language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/speecht5/modeling_speecht5.py
|
{
"start": 57999,
"end": 59317
}
|
class ____(SpeechT5PreTrainedModel):
"""
Wrapper around SpeechT5Encoder that applies SpeechT5TextEncoderPrenet to convert the input_ids to hidden features.
"""
def __init__(self, config: SpeechT5Config):
super().__init__(config)
self.prenet = SpeechT5TextEncoderPrenet(config)
self.wrapped_encoder = SpeechT5Encoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.prenet.get_input_embeddings()
def set_input_embeddings(self, value):
self.prenet.set_input_embeddings(value)
def forward(
self,
input_values: torch.FloatTensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
hidden_states = self.prenet(input_values)
outputs = self.wrapped_encoder(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return outputs
|
SpeechT5EncoderWithTextPrenet
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/lite_test.py
|
{
"start": 2667,
"end": 3109
}
|
class ____(LiteTest):
def assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_test.py', file_names)
self.assertNotIn('lite_v2_test.py', file_names)
|
TestModels
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/events.py
|
{
"start": 49568,
"end": 49812
}
|
class ____(Response):
"""
Response of events.download_task_log endpoint.
"""
_service = "events"
_action = "download_task_log"
_version = "2.13"
_schema = {"definitions": {}, "type": "string"}
|
DownloadTaskLogResponse
|
python
|
ansible__ansible
|
packaging/release.py
|
{
"start": 11527,
"end": 48273
}
|
class ____(enum.Enum):
"""How to handle the ansible-core version."""
DEFAULT = enum.auto()
"""Do not allow development versions. Do not allow post release versions."""
STRIP_POST = enum.auto()
"""Do not allow development versions. Strip the post release from the version if present."""
REQUIRE_POST = enum.auto()
"""Do not allow development versions. Require a post release version."""
REQUIRE_DEV_POST = enum.auto()
"""Require a development or post release version."""
ALLOW_DEV_POST = enum.auto()
"""Allow development and post release versions."""
def apply(self, version: Version) -> Version:
"""Apply the mode to the given version and return the result."""
original_version = version
release_component_count = 3
if len(version.release) != release_component_count:
raise ApplicationError(f"Version {version} contains {version.release} release components instead of {release_component_count}.")
if version.epoch:
raise ApplicationError(f"Version {version} contains an epoch component: {version.epoch}")
if version.local is not None:
raise ApplicationError(f"Version {version} contains a local component: {version.local}")
if version.is_devrelease and version.is_postrelease:
raise ApplicationError(f"Version {version} is a development and post release version.")
if self == VersionMode.ALLOW_DEV_POST:
return version
if self == VersionMode.REQUIRE_DEV_POST:
if not version.is_devrelease and not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a development or post release version.")
return version
if version.is_devrelease:
raise ApplicationError(f"Version {version} is a development release: {version.dev}")
if self == VersionMode.STRIP_POST:
if version.is_postrelease:
version = Version(str(version).removesuffix(f".post{version.post}"))
display.warning(f"Using version {version} by stripping the post release suffix from version {original_version}.")
return version
if self == VersionMode.REQUIRE_POST:
if not version.is_postrelease:
raise ApplicationError(f"Version {version} is not a post release version.")
return version
if version.is_postrelease:
raise ApplicationError(f"Version {version} is a post release.")
if self == VersionMode.DEFAULT:
return version
raise NotImplementedError(self)
@t.overload
def git(*args: t.Any, capture_output: t.Literal[True]) -> CompletedProcess: ...
@t.overload
def git(*args: t.Any, capture_output: t.Literal[False]) -> None: ...
@t.overload
def git(*args: t.Any) -> None: ...
def git(*args: t.Any, capture_output: bool = False) -> CompletedProcess | None:
"""Run the specified git command."""
return run("git", *args, env=None, cwd=CHECKOUT_DIR, capture_output=capture_output)
def get_commit(rev: str | None = None) -> str:
"""Return the commit associated with the given rev, or HEAD if no rev is given."""
try:
return git("rev-parse", "--quiet", "--verify", "--end-of-options", f"{rev or 'HEAD'}^{{commit}}", capture_output=True).stdout.strip()
except CalledProcessError as ex:
if ex.status == 1 and not ex.stdout and not ex.stderr:
raise ApplicationError(f"Could not find commit: {rev}") from None
raise
def prepare_pull_request(version: Version, branch: str, title: str, add: t.Iterable[pathlib.Path | str], allow_stale: bool) -> PullRequest:
"""Return pull request parameters using the provided details."""
git_state = get_git_state(version, allow_stale)
if not git("status", "--porcelain", "--untracked-files=no", capture_output=True).stdout.strip():
raise ApplicationError("There are no changes to commit. Did you skip a step?")
upstream_branch = get_upstream_branch(version)
body = create_pull_request_body(title)
git("checkout", "-b", branch)
git("add", *add)
git("commit", "-m", title)
git("push", "--set-upstream", git_state.remotes.fork.name, branch)
git("checkout", git_state.branch or git_state.commit)
git("branch", "-d", branch)
pr = PullRequest(
upstream_user=git_state.remotes.upstream.user,
upstream_repo=git_state.remotes.upstream.repo,
upstream_branch=upstream_branch,
user=git_state.remotes.fork.user,
repo=git_state.remotes.fork.repo,
branch=branch,
title=title,
body=body,
)
return pr
def create_github_release(release: GitHubRelease) -> None:
"""Open a browser tab for creating the given GitHub release."""
# See: https://docs.github.com/en/repositories/releasing-projects-on-github/automation-for-release-forms-with-query-parameters
params = dict(
tag=release.tag,
target=release.target,
title=release.title,
body=release.body,
prerelease=1 if release.pre_release else 0,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{release.user}/{release.repo}/releases/new?{query_string}"
display.show("Opening release creation page in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request(pr: PullRequest) -> None:
"""Open a browser tab for creating the given pull request."""
# See: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/using-query-parameters-to-create-a-pull-request # noqa
params = dict(
quick_pull=1,
title=pr.title,
body=pr.body,
)
query_string = urllib.parse.urlencode(params)
url = f"https://github.com/{pr.upstream_user}/{pr.upstream_repo}/compare/{pr.upstream_branch}...{pr.user}:{pr.repo}:{pr.branch}?{query_string}"
display.show("Opening pull request in new tab using default browser ...")
webbrowser.open_new_tab(url)
def create_pull_request_body(title: str) -> str:
"""Return a simple pull request body created from the given title."""
body = f"""
##### SUMMARY
{title}
##### ISSUE TYPE
Feature Pull Request
"""
return body.lstrip()
def get_remote(name: str, push: bool) -> Remote:
"""Return details about the specified remote."""
remote_url = git("remote", "get-url", *(["--push"] if push else []), name, capture_output=True).stdout.strip()
remote_match = re.search(r"[@/]github[.]com[:/](?P<user>[^/]+)/(?P<repo>[^.]+)(?:[.]git)?$", remote_url)
if not remote_match:
raise RuntimeError(f"Unable to identify the user and repo in the '{name}' remote: {remote_url}")
remote = Remote(
name=name,
user=remote_match.group("user"),
repo=remote_match.group("repo"),
)
return remote
@functools.cache
def get_remotes() -> Remotes:
"""Return details about the remotes we need to use."""
# assume the devel branch has its upstream remote pointing to the user's fork
fork_remote_name = git("branch", "--list", "devel", "--format=%(upstream:remotename)", capture_output=True).stdout.strip()
if not fork_remote_name:
raise ApplicationError("Could not determine the remote for your fork of Ansible.")
display.show(f"Detected '{fork_remote_name}' as the remote for your fork of Ansible.")
# assume there is only one ansible org remote, which would allow release testing using another repo in the same org without special configuration
all_remotes = git("remote", "-v", capture_output=True).stdout.strip().splitlines()
ansible_remote_names = set(line.split()[0] for line in all_remotes if re.search(r"[@/]github[.]com[:/]ansible/", line))
if not ansible_remote_names:
raise ApplicationError(f"Could not determine the remote which '{fork_remote_name}' was forked from.")
if len(ansible_remote_names) > 1:
raise ApplicationError(f"Found multiple candidates for the remote from which '{fork_remote_name}' was forked from: {', '.join(ansible_remote_names)}")
upstream_remote_name = ansible_remote_names.pop()
display.show(f"Detected '{upstream_remote_name}' as the remote from which '{fork_remote_name}' was forked from.")
if fork_remote_name == upstream_remote_name:
raise ApplicationError("The remote for your fork of Ansible cannot be the same as the remote from which it was forked.")
remotes = Remotes(
fork=get_remote(fork_remote_name, push=True),
upstream=get_remote(upstream_remote_name, push=False),
)
return remotes
def get_upstream_branch(version: Version) -> str:
"""Return the upstream branch name for the given version."""
return f"stable-{version.major}.{version.minor}"
def get_git_state(version: Version, allow_stale: bool) -> GitState:
"""Return information about the current state of the git repository."""
remotes = get_remotes()
upstream_branch = get_upstream_branch(version)
git("fetch", remotes.upstream.name, upstream_branch)
upstream_ref = f"{remotes.upstream.name}/{upstream_branch}"
upstream_commit = get_commit(upstream_ref)
commit = get_commit()
if commit != upstream_commit:
with suppress_when(allow_stale):
raise ApplicationError(f"The current commit ({commit}) does not match {upstream_ref} ({upstream_commit}).")
branch = git("branch", "--show-current", capture_output=True).stdout.strip() or None
state = GitState(
remotes=remotes,
branch=branch,
commit=commit,
)
return state
@functools.cache
def ensure_venv(requirements_content: str) -> dict[str, t.Any]:
"""Ensure the release venv is ready and return the env vars needed to use it."""
requirements_hash = hashlib.sha256(requirements_content.encode()).hexdigest()[:8]
python_version = ".".join(map(str, sys.version_info[:2]))
venv_dir = VENV_DIR / python_version / requirements_hash
venv_bin_dir = venv_dir / "bin"
venv_requirements_file = venv_dir / "requirements.txt"
venv_marker_file = venv_dir / "marker.txt"
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
if not venv_marker_file.exists():
display.show(f"Creating a Python {python_version} virtual environment ({requirements_hash}) ...")
if venv_dir.exists():
shutil.rmtree(venv_dir)
venv.create(venv_dir, with_pip=True)
venv_requirements_file.write_text(requirements_content)
run("pip", "install", "-r", venv_requirements_file, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
venv_marker_file.touch()
return env
def get_pypi_project(repository: str, project: str, version: Version | None = None) -> dict[str, t.Any]:
"""Return the project JSON from PyPI for the specified repository, project and version (optional)."""
endpoint = PYPI_ENDPOINTS[repository]
if version:
url = f"{endpoint}/{project}/{version}/json"
else:
url = f"{endpoint}/{project}/json"
opener = urllib.request.build_opener()
response: http.client.HTTPResponse
try:
with opener.open(url) as response:
data = json.load(response)
except urllib.error.HTTPError as ex:
if version:
target = f'{project!r} version {version}'
else:
target = f'{project!r}'
if ex.status == http.HTTPStatus.NOT_FOUND:
raise ApplicationError(f"Could not find {target} on PyPI.") from None
raise RuntimeError(f"Failed to get {target} from PyPI.") from ex
return data
def get_ansible_version(version: str | None = None, /, commit: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Parse and return the current ansible-core version, the provided version or the version from the provided commit."""
if version and commit:
raise ValueError("Specify only one of: version, commit")
if version:
source = ""
else:
if commit:
current = git("show", f"{commit}:{ANSIBLE_RELEASE_FILE.relative_to(CHECKOUT_DIR)}", capture_output=True).stdout
else:
current = ANSIBLE_RELEASE_FILE.read_text()
if not (match := ANSIBLE_VERSION_PATTERN.search(current)):
raise RuntimeError("Failed to get the ansible-core version.")
version = match.group("version")
source = f" in '{ANSIBLE_RELEASE_FILE}'"
try:
parsed_version = Version(version)
except InvalidVersion:
raise ApplicationError(f"Invalid version{source}: {version}") from None
parsed_version = mode.apply(parsed_version)
return parsed_version
def get_next_version(version: Version, /, final: bool = False, pre: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version:
"""Return the next version after the specified version."""
# TODO: consider using development versions instead of post versions after a release is published
pre = pre or ""
micro = version.micro
if version.is_devrelease:
# The next version of a development release is the same version without the development component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1]}"
elif not pre:
pre = "b1" # when there is no existing pre and none specified, advance to b1
elif version.is_postrelease:
# The next version of a post release is the next pre-release *or* micro release component.
if final:
pre = ""
elif not pre and version.pre is not None:
pre = f"{version.pre[0]}{version.pre[1] + 1}"
elif not pre:
pre = "rc1" # when there is no existing pre and none specified, advance to rc1
if version.pre is None:
micro = version.micro + 1
else:
raise ApplicationError(f"Version {version} is not a development or post release version.")
version = f"{version.major}.{version.minor}.{micro}{pre}"
return get_ansible_version(version, mode=mode)
def check_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Verify the requested version is valid for the current version."""
if requested_version.release[:2] != current_version.release[:2]:
raise ApplicationError(f"Version {requested_version} does not match the major and minor portion of the current version: {current_version}")
if requested_version < current_version:
raise ApplicationError(f"Version {requested_version} is older than the current version: {current_version}")
# TODO: consider additional checks to avoid mistakes when incrementing the release version
def set_ansible_version(current_version: Version, requested_version: Version) -> None:
"""Set the current ansible-core version."""
check_ansible_version(current_version, requested_version)
if requested_version == current_version:
return
display.show(f"Updating version {current_version} to {requested_version} ...")
current = ANSIBLE_RELEASE_FILE.read_text()
updated = ANSIBLE_VERSION_PATTERN.sub(ANSIBLE_VERSION_FORMAT.format(version=requested_version), current)
if current == updated:
raise RuntimeError("Failed to set the ansible-core version.")
ANSIBLE_RELEASE_FILE.write_text(updated)
def get_latest_setuptools_version() -> Version:
"""Return the latest setuptools version found on PyPI."""
data = get_pypi_project('pypi', 'setuptools')
version = Version(data['info']['version'])
return version
def set_setuptools_upper_bound(requested_version: Version) -> None:
"""Set the upper bound on setuptools in pyproject.toml."""
current = ANSIBLE_PYPROJECT_TOML_FILE.read_text()
pattern = re.compile(r'^(?P<begin>requires = \["setuptools >= )(?P<lower>[^,]+)(?P<middle>, <= )(?P<upper>[^"]+)(?P<end>".*)$', re.MULTILINE)
match = pattern.search(current)
if not match:
raise ApplicationError(f"Unable to find the 'requires' entry in: {ANSIBLE_PYPROJECT_TOML_FILE.relative_to(CHECKOUT_DIR)}")
current_version = Version(match.group('upper'))
if requested_version == current_version:
return
display.show(f"Updating setuptools upper bound from {current_version} to {requested_version} ...")
updated = pattern.sub(fr'\g<begin>\g<lower>\g<middle>{requested_version}\g<end>', current)
if current == updated:
raise RuntimeError("Failed to set the setuptools upper bound.")
ANSIBLE_PYPROJECT_TOML_FILE.write_text(updated)
def create_reproducible_sdist(original_path: pathlib.Path, output_path: pathlib.Path, mtime: int) -> None:
"""Read the specified sdist and write out a new copy with uniform file metadata at the specified location."""
with tarfile.open(original_path) as original_archive:
with tempfile.TemporaryDirectory() as temp_dir:
tar_file = pathlib.Path(temp_dir) / "sdist.tar"
with tarfile.open(tar_file, mode="w") as tar_archive:
for original_info in original_archive.getmembers(): # type: tarfile.TarInfo
tar_archive.addfile(create_reproducible_tar_info(original_info, mtime), original_archive.extractfile(original_info))
with tar_file.open("rb") as tar_archive:
with gzip.GzipFile(output_path, "wb", mtime=mtime) as output_archive:
shutil.copyfileobj(tar_archive, output_archive)
def create_reproducible_tar_info(original: tarfile.TarInfo, mtime: int) -> tarfile.TarInfo:
"""Return a copy of the given TarInfo with uniform file metadata."""
sanitized = tarfile.TarInfo()
sanitized.name = original.name
sanitized.size = original.size
sanitized.mtime = mtime
sanitized.mode = (original.mode & ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)) | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
sanitized.type = original.type
sanitized.linkname = original.linkname
sanitized.uid = 0
sanitized.gid = 0
sanitized.uname = "root"
sanitized.gname = "root"
if original.mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
sanitized.mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return sanitized
def test_built_artifact(path: pathlib.Path) -> None:
"""Test the specified built artifact by installing it in a venv and running some basic commands."""
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
venv_dir = temp_dir / "venv"
venv_bin_dir = venv_dir / "bin"
venv.create(venv_dir, with_pip=True)
env = os.environ.copy()
env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment
env.update(
PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])),
)
run("pip", "install", path, env=env | PIP_ENV, cwd=CHECKOUT_DIR)
run("ansible", "--version", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "--version", env=env, cwd=CHECKOUT_DIR)
def get_sdist_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the sdist file."""
return dist_dir / f"ansible_core-{version}.tar.gz"
def get_wheel_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path:
"""Return the path to the wheel file."""
return dist_dir / f"ansible_core-{version}-py3-none-any.whl"
def calculate_digest(path: pathlib.Path) -> str:
"""Return the digest for the specified file."""
with open(path, "rb") as f:
digest = hashlib.file_digest(f, DIGEST_ALGORITHM)
return digest.hexdigest()
@functools.cache
def get_release_artifact_details(repository: str, version: Version, validate: bool) -> list[ReleaseArtifact]:
"""Return information about the release artifacts hosted on PyPI."""
data = get_pypi_project(repository, 'ansible-core', version)
artifacts = [describe_release_artifact(version, item, validate) for item in data["urls"]]
expected_artifact_types = {"bdist_wheel", "sdist"}
found_artifact_types = set(artifact.package_type for artifact in artifacts)
if found_artifact_types != expected_artifact_types:
raise RuntimeError(f"Expected {expected_artifact_types} artifact types, but found {found_artifact_types} instead.")
return artifacts
def describe_release_artifact(version: Version, item: dict[str, t.Any], validate: bool) -> ReleaseArtifact:
"""Return release artifact details extracted from the given PyPI data."""
package_type = item["packagetype"]
# The artifact URL is documented as stable, so is safe to put in release notes.
# See: https://github.com/pypi/warehouse/blame/c95be4a1055f4b36a8852715eb80318c81fc00ca/docs/api-reference/integration-guide.rst#L86-L90
url = item["url"]
pypi_size = item["size"]
pypi_digest = item["digests"][DIGEST_ALGORITHM]
if package_type == "bdist_wheel":
local_artifact_file = get_wheel_path(version)
package_label = "Built Distribution"
elif package_type == "sdist":
local_artifact_file = get_sdist_path(version)
package_label = "Source Distribution"
else:
raise NotImplementedError(f"Package type '{package_type}' is not supported.")
if validate:
try:
local_size = local_artifact_file.stat().st_size
local_digest = calculate_digest(local_artifact_file)
except FileNotFoundError:
raise ApplicationError(f"Missing local artifact: {local_artifact_file.relative_to(CHECKOUT_DIR)}") from None
if local_size != pypi_size:
raise ApplicationError(f"The {version} local {package_type} size {local_size} does not match the PyPI size {pypi_size}.")
if local_digest != pypi_digest:
raise ApplicationError(f"The {version} local {package_type} digest '{local_digest}' does not match the PyPI digest '{pypi_digest}'.")
return ReleaseArtifact(
package_type=package_type,
package_label=package_label,
url=url,
size=pypi_size,
digest=pypi_digest,
digest_algorithm=DIGEST_ALGORITHM.upper(),
)
def get_next_release_date(start: datetime.date, step: int, after: datetime.date) -> datetime.date:
"""Return the next release date."""
if start > after:
raise ValueError(f"{start=} is greater than {after=}")
current_delta = after - start
release_delta = datetime.timedelta(days=(math.floor(current_delta.days / step) + 1) * step)
release = start + release_delta
return release
def create_template_environment() -> jinja2.Environment:
"""Create and return a jinja2 environment."""
env = jinja2.Environment()
env.filters.update(
basename=os.path.basename,
)
return env
def create_github_release_notes(upstream: Remote, repository: str, version: Version, validate: bool) -> str:
"""Create and return GitHub release notes."""
env = create_template_environment()
template = env.from_string(GITHUB_RELEASE_NOTES_TEMPLATE)
variables = dict(
version=version,
releases=get_release_artifact_details(repository, version, validate),
changelog=f"https://github.com/{upstream.user}/{upstream.repo}/blob/v{version}/changelogs/CHANGELOG-v{version.major}.{version.minor}.rst",
)
release_notes = template.render(**variables).strip()
return release_notes
# endregion
# region Templates
GITHUB_RELEASE_NOTES_TEMPLATE = """
# Changelog
See the [full changelog]({{ changelog }}) for the changes included in this release.
# Release Artifacts
{%- for release in releases %}
* {{ release.package_label }}: [{{ release.url|basename }}]({{ release.url }}) - ‌{{ release.size }} bytes
* {{ release.digest }} ({{ release.digest_algorithm }})
{%- endfor %}
"""
# endregion
# region Commands
command = CommandFramework(
repository=dict(metavar="REPO", choices=tuple(PYPI_ENDPOINTS), default="pypi", help="PyPI repository to use: %(choices)s [%(default)s]"),
version=dict(exclusive="version", help="version to set"),
pre=dict(exclusive="version", help="increment version to the specified pre-release (aN, bN, rcN)"),
final=dict(exclusive="version", action="store_true", help="increment version to the next final release"),
commit=dict(help="commit to tag"),
validate=dict(name="--no-validate", action="store_false", help="disable validation of PyPI artifacts against local ones"),
prompt=dict(name="--no-prompt", action="store_false", help="disable interactive prompt before publishing with twine"),
setuptools=dict(name='--no-setuptools', action="store_false", help="disable updating setuptools upper bound"),
allow_tag=dict(action="store_true", help="allow an existing release tag (for testing)"),
allow_stale=dict(action="store_true", help="allow a stale checkout (for testing)"),
allow_dirty=dict(action="store_true", help="allow untracked files and files with changes (for testing)"),
)
@command
def instructions() -> None:
"""Show instructions for the release process."""
message = """
Releases must be performed using an up-to-date checkout of a fork of the Ansible repository.
1. Make sure your checkout is up-to-date.
2. Run the `prepare` command [1], then:
a. Submit the PR opened in the browser.
b. Wait for CI to pass.
c. Merge the PR.
3. Update your checkout to include the commit from the PR which was just merged.
4. Run the `complete` command [2], then:
a. Submit the GitHub release opened in the browser.
b. Submit the PR opened in the browser.
c. Wait for CI to pass.
d. Merge the PR.
[1] Use the `--final`, `--pre` or `--version` option for control over the version.
[2] During the `publish` step, `twine` may prompt for credentials.
"""
display.show(message.strip())
@command
def show_version(final: bool = False, pre: str | None = None) -> None:
"""Show the current and next ansible-core version."""
current_version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
display.show(f"Current version: {current_version}")
try:
next_version = get_next_version(current_version, final=final, pre=pre)
except ApplicationError as ex:
display.show(f" Next version: Unknown - {ex}")
else:
display.show(f" Next version: {next_version}")
check_ansible_version(current_version, next_version)
@command
def check_state(allow_stale: bool = False) -> None:
"""Verify the git repository is in a usable state for creating a pull request."""
get_git_state(get_ansible_version(), allow_stale)
# noinspection PyUnusedLocal
@command
def prepare(final: bool = False, pre: str | None = None, version: str | None = None, setuptools: bool | None = None) -> None:
"""Prepare a release."""
command.run(
update_version,
update_setuptools,
check_state,
generate_summary,
generate_changelog,
create_release_pr,
)
@command
def update_version(final: bool = False, pre: str | None = None, version: str | None = None) -> None:
"""Update the version embedded in the source code."""
current_version = get_ansible_version(mode=VersionMode.REQUIRE_DEV_POST)
if version:
requested_version = get_ansible_version(version)
else:
requested_version = get_next_version(current_version, final=final, pre=pre)
set_ansible_version(current_version, requested_version)
@command
def update_setuptools(setuptools: bool) -> None:
"""Update the setuptools upper bound in pyproject.toml."""
if not setuptools:
return
requested_version = get_latest_setuptools_version()
set_setuptools_upper_bound(requested_version)
@command
def generate_summary() -> None:
"""Generate a summary changelog fragment for this release."""
version = get_ansible_version()
release_date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d")
summary_path = CHANGELOGS_FRAGMENTS_DIR / f"{version}_summary.yaml"
major_minor = f"{version.major}.{version.minor}"
content = f"""
release_summary: |
| Release Date: {release_date}
| `Porting Guide <https://docs.ansible.com/ansible-core/{major_minor}/porting_guides/porting_guide_core_{major_minor}.html>`__
"""
summary_path.write_text(content.lstrip())
@command
def generate_changelog() -> None:
"""Generate the changelog and validate the results."""
changelog_requirements = (
ANSIBLE_CHANGELOG_REQUIREMENTS_FILE.read_text()
+ ANSIBLE_REQUIREMENTS_FILE.read_text() # TODO: consider pinning the ansible requirements and dependencies
)
env = ensure_venv(changelog_requirements)
env.update(
PATH=os.pathsep.join((str(ANSIBLE_BIN_DIR), env["PATH"])),
PYTHONPATH=ANSIBLE_LIB_DIR,
)
# TODO: consider switching back to the original changelog generator instead of using antsibull-changelog
run("antsibull-changelog", "release", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("antsibull-changelog", "generate", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR)
run("ansible-test", "sanity", CHANGELOGS_DIR, ANSIBLE_RELEASE_FILE, env=env, cwd=CHECKOUT_DIR)
@command
def create_release_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a release pull request."""
version = get_ansible_version()
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"New release v{version}",
add=(
CHANGELOGS_DIR,
ANSIBLE_RELEASE_FILE,
ANSIBLE_PYPROJECT_TOML_FILE,
),
allow_stale=allow_stale,
)
create_pull_request(pr)
# noinspection PyUnusedLocal
@command
def complete(repository: str, allow_dirty: bool = False) -> None:
"""Complete a release after the prepared changes have been merged."""
command.run(
check_state,
build,
test,
publish,
tag_release,
post_version,
create_post_pr,
)
@command
def build(allow_dirty: bool = False) -> None:
"""Build the sdist and wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
# TODO: consider pinning the build requirement and its dependencies
build_requirements = """
build
"""
env = ensure_venv(build_requirements)
dirty = git("status", "--porcelain", "--untracked-files=all", capture_output=True).stdout.strip().splitlines()
if dirty:
with suppress_when(allow_dirty):
raise ApplicationError(f"There are {len(dirty)} files which are untracked and/or have changes, which will be omitted from the build.")
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory(dir=DIST_DIR, prefix=f"build-{version}-", suffix=".tmp") as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
dist_dir = temp_dir / "dist"
commit_time = int(git("show", "-s", "--format=%ct", capture_output=True).stdout)
env.update(
SOURCE_DATE_EPOCH=commit_time,
)
git("worktree", "add", "-d", temp_dir)
try:
run("python", "-m", "build", env=env, cwd=temp_dir)
create_reproducible_sdist(get_sdist_path(version, dist_dir), sdist_file, commit_time)
get_wheel_path(version, dist_dir).rename(wheel_file)
finally:
git("worktree", "remove", temp_dir)
@command
def test() -> None:
"""Test the sdist and wheel."""
command.run(
test_sdist,
test_wheel,
)
@command
def test_sdist() -> None:
"""Test the sdist."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
sdist_file = get_sdist_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
sdist = stack.enter_context(tarfile.open(sdist_file))
except FileNotFoundError:
raise ApplicationError(f"Missing sdist: {sdist_file.relative_to(CHECKOUT_DIR)}") from None
sdist.extractall(temp_dir, filter='data')
pyc_glob = "*.pyc*"
pyc_files = sorted(path.relative_to(temp_dir) for path in temp_dir.rglob(pyc_glob))
if pyc_files:
raise ApplicationError(f"Found {len(pyc_files)} '{pyc_glob}' file(s): {', '.join(map(str, pyc_files))}")
test_built_artifact(sdist_file)
@command
def test_wheel() -> None:
"""Test the wheel."""
version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST)
wheel_file = get_wheel_path(version)
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_dir = pathlib.Path(temp_dir_name)
with contextlib.ExitStack() as stack:
try:
wheel = stack.enter_context(zipfile.ZipFile(wheel_file))
except FileNotFoundError:
raise ApplicationError(f"Missing wheel for version {version}: {wheel_file}") from None
wheel.extractall(temp_dir)
test_built_artifact(wheel_file)
@command
def publish(repository: str, prompt: bool = True) -> None:
"""Publish to PyPI."""
version = get_ansible_version()
sdist_file = get_sdist_path(version)
wheel_file = get_wheel_path(version)
# TODO: consider pinning the twine requirement and its dependencies
publish_requirements = """
twine
"""
env = ensure_venv(publish_requirements)
if prompt:
try:
while input(f"Do you want to publish {version} to the '{repository}' repository?\nEnter the repository name to confirm: ") != repository:
pass
except KeyboardInterrupt:
display.show("")
raise ApplicationError("Publishing was aborted by the user.") from None
run("twine", "upload", "-r", repository, sdist_file, wheel_file, env=env, cwd=CHECKOUT_DIR)
@command
def tag_release(repository: str, commit: str | None = None, validate: bool = True, allow_tag: bool = False) -> None:
"""Create a GitHub release using the current or specified commit."""
upstream = get_remotes().upstream
if commit:
git("fetch", upstream.name) # fetch upstream to make sure the commit can be found
commit = get_commit(commit)
version = get_ansible_version(commit=commit)
tag = f"v{version}"
if upstream_tag := git("ls-remote", "--tags", upstream.name, tag, capture_output=True).stdout.strip():
with suppress_when(allow_tag):
raise ApplicationError(f"Version {version} has already been tagged: {upstream_tag}")
upstream_branch = get_upstream_branch(version)
upstream_refs = git("branch", "-r", "--format=%(refname)", "--contains", commit, capture_output=True).stdout.strip().splitlines()
upstream_ref = f"refs/remotes/{upstream.name}/{upstream_branch}"
if upstream_ref not in upstream_refs:
raise ApplicationError(f"Commit {upstream_ref} not found. Found {len(upstream_refs)} upstream ref(s): {', '.join(upstream_refs)}")
body = create_github_release_notes(upstream, repository, version, validate)
release = GitHubRelease(
user=upstream.user,
repo=upstream.repo,
target=commit,
tag=tag,
title=tag,
body=body,
pre_release=version.pre is not None,
)
create_github_release(release)
@command
def post_version() -> None:
"""Set the post release version."""
current_version = get_ansible_version()
requested_version = get_ansible_version(f"{current_version}.post0", mode=VersionMode.REQUIRE_POST)
set_ansible_version(current_version, requested_version)
@command
def create_post_pr(allow_stale: bool = False) -> None:
"""Create a branch and open a browser tab for creating a post release pull request."""
version = get_ansible_version(mode=VersionMode.REQUIRE_POST)
pr = prepare_pull_request(
version=version,
branch=f"release-{version}-{secrets.token_hex(4)}",
title=f"Update Ansible release version to v{version}.",
add=(ANSIBLE_RELEASE_FILE,),
allow_stale=allow_stale,
)
create_pull_request(pr)
# endregion
if __name__ == "__main__":
command.main()
|
VersionMode
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/tools/flatbuffer_utils_test.py
|
{
"start": 11100,
"end": 12419
}
|
class ____(test_util.TensorFlowTestCase):
op: schema.Operator
op_t: schema.OperatorT
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.op = test_utils.build_operator_with_options()
cls.op_t = schema.OperatorT.InitFromObj(cls.op)
def test_get_options(self):
ty = schema.StableHLOCompositeOptionsT
opts = flatbuffer_utils.get_options_as(self.op, ty)
self.assertIsNotNone(opts)
self.assertIsInstance(opts, ty)
self.assertEqual(opts.decompositionSubgraphIndex, 10)
def test_get_options_obj(self):
ty = schema.StableHLOCompositeOptionsT
opts = flatbuffer_utils.get_options_as(self.op_t, ty)
self.assertIsNotNone(opts)
self.assertIsInstance(opts, ty)
self.assertEqual(opts.decompositionSubgraphIndex, 10)
def test_get_options_not_schema_type_raises(self):
with self.assertRaises(ValueError):
flatbuffer_utils.get_options_as(self.op, int)
def test_get_options_not_object_type_raises(self):
with self.assertRaises(ValueError):
flatbuffer_utils.get_options_as(self.op, schema.StableHLOCompositeOptions)
def test_get_options_op_type_does_not_match(self):
ty = schema.Conv2DOptionsT
opts = flatbuffer_utils.get_options_as(self.op, ty)
self.assertIsNone(opts)
if __name__ == '__main__':
test.main()
|
GetOptionsTest
|
python
|
pytorch__pytorch
|
test/distributed/_tools/test_mod_tracker.py
|
{
"start": 270,
"end": 7020
}
|
class ____(TestCase):
# "https://github.com/pytorch/pytorch/issues/127112
@xfailIfTorchDynamo
def test_module_hierarchy(self):
seen_fw = []
seen_bw = []
class Foo(torch.nn.Module):
def forward(self, x):
x = x["a"].relu_()
seen_fw.append((copy(tracker.parents), tracker.is_bw))
x.register_hook(
lambda grad: seen_bw.append((copy(tracker.parents), tracker.is_bw))
)
return {"a": torch.mm(x, x)}
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = Foo()
self.b = torch.nn.ModuleDict({"nest": Foo()})
self.c = torch.nn.ModuleList([Foo()])
def forward(self, x):
x = self.c[0](x)
return self.b["nest"](self.a(x))
mod = Mod()
with ModTracker() as tracker:
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
mod({"a": torch.randn(10, 10, requires_grad=True).clone()})[
"a"
].sum().backward()
self.assertEqual(
seen_fw,
[
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
({"Global", "Mod", "Mod.c.0"}, False),
({"Global", "Mod", "Mod.a"}, False),
({"Global", "Mod", "Mod.b.nest"}, False),
],
)
self.assertEqual(
seen_bw,
[
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
({"Global", "Mod", "Mod.b.nest"}, True),
({"Global", "Mod", "Mod.a"}, True),
({"Global", "Mod", "Mod.c.0"}, True),
],
)
def test_bw_detection(self):
mod = torch.nn.Linear(2, 2)
with ModTracker() as tracker:
mod(torch.rand(2, requires_grad=True)).sum().backward()
self.assertFalse(tracker.is_bw)
self.assertEqual(tracker.parents, {"Global"})
@xfailIfTorchDynamo
def test_user_hooks(self):
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(10, 10)
def forward(self, x):
return self.foo(x).relu_()
mt = ModTracker()
test_op = []
def hook(mod, hook_name):
mfqn = mt.get_known_fqn(mod) if mod is not None else None
test_op.append((hook_name, mfqn, mfqn in mt.parents, mt.is_bw))
mod = Bar()
mt.register_user_hooks(
lambda m, inp: hook(m, "pre_fw"),
lambda m, inp, op: hook(m, "post_fw"),
lambda m, gop: hook(m, "pre_bw"),
lambda m, ginp: hook(m, "post_bw"),
)
with mt:
mod(torch.rand(10, 10, requires_grad=True)).sum().backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", "Bar", True, True),
("pre_bw", "Bar.foo", True, True),
("post_bw", "Bar", True, True),
("post_bw", "Bar.foo", True, True),
]
self.assertEqual(test_op, expected_op)
with self.assertRaises(AssertionError):
mt.register_user_hooks(lambda x, y: x, None, None, None)
test_op.clear()
with mt:
loss = mod(torch.rand(10, 10, requires_grad=True)).sum()
del mod
loss.backward()
expected_op = [
("pre_fw", "Bar", True, False),
("pre_fw", "Bar.foo", True, False),
("post_fw", "Bar.foo", True, False),
("post_fw", "Bar", True, False),
("pre_bw", None, False, True),
("pre_bw", None, False, True),
("post_bw", None, False, True),
("post_bw", None, False, True),
]
self.assertEqual(test_op, expected_op)
@xfailIfTorchDynamo
def test_ac(self):
class Foo(torch.nn.Module):
def __init__(self, n_layers: int, dim: int, use_ac: bool = False):
super().__init__()
self.linears = torch.nn.ModuleList()
self.use_ac = use_ac
for _ in range(n_layers):
self.linears.append(torch.nn.Linear(dim, dim))
def forward(self, x):
for i, block in enumerate(self.linears):
if i >= 1 and self.use_ac:
x = checkpoint(
block, x, preserve_rng_state=True, use_reentrant=False
)
else:
x = block(x)
assert x is not None
x = torch.nn.functional.relu(x)
return x
bsz = 2
dim = 8
n_layers = 2
test_op = []
def hook(mod, mt, hook_name):
mfqn = mt.get_known_fqn(mod) if mod is not None else None
test_op.append((hook_name, mfqn, mfqn in mt.parents, mt.is_bw))
mt = ModTracker()
mt.register_user_hooks(
lambda m, i: hook(m, mt, "pre_fw"),
lambda m, i, o: hook(m, mt, "post_fw"),
lambda m, go: hook(m, mt, "pre_bw"),
lambda m, gi: hook(m, mt, "post_bw"),
)
model = Foo(n_layers, dim, True)
x = torch.randn(bsz, dim)
with mt:
model(x).sum().backward()
expected_op = [
("pre_fw", "Foo", True, False),
("pre_fw", "Foo.linears.0", True, False),
("post_fw", "Foo.linears.0", True, False),
("pre_fw", "Foo.linears.1", True, False),
("post_fw", "Foo.linears.1", True, False),
("post_fw", "Foo", True, False),
("pre_bw", "Foo", True, True),
("pre_bw", "Foo.linears.1", True, True),
("pre_fw", "Foo.linears.1", True, True),
("post_fw", "Foo.linears.1", True, True),
("post_bw", "Foo.linears.1", True, True),
("pre_bw", "Foo.linears.0", True, True),
("post_bw", "Foo.linears.0", True, True),
("post_bw", "Foo", True, True),
]
self.assertEqual(test_op, expected_op)
if __name__ == "__main__":
run_tests()
|
TestModTracker
|
python
|
pytorch__pytorch
|
test/inductor/test_select_algorithm.py
|
{
"start": 2288,
"end": 18272
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
if not is_big_gpu():
return self.skipTest("Need a big GPU to run max_autotune=True")
# Clear preprocessing functions to ensure clean state
select_algorithm.clear_preprocessing_fns()
@patches
def test_linear_relu(self):
@torch.compile
def foo(input, weight, bias):
return F.relu(F.linear(input, weight, bias))
foo(
torch.randn(64, 32, device=GPU_TYPE),
torch.randn(16, 32, device=GPU_TYPE),
torch.randn(1, 16, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
# It would be nice to assert this got fused into a single kernel, but that
# only happens if we select a triton template (and not aten).
@patches
def test_addmm(self):
@torch.compile
def foo(input, weight, bias):
return torch.addmm(bias, input, weight)
inps = (
torch.randn(20, 33, device=GPU_TYPE),
torch.randn(33, 16, device=GPU_TYPE),
torch.randn(20, 16, device=GPU_TYPE),
)
foo(*inps)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_preprocessing_single_choice(self):
# pass a list to the preprocessing function to assert that it was
# actually called
func_called = [False]
# Register a preprocessing function that returns only the first choice
# This in turn will lead to autotuning being skipped as it's a single
# choice, and the counter itself will not be bumped
def return_first_choice_only(choices):
func_called[0] = True
return choices[:1] if choices else []
select_algorithm.add_preprocessing_fn(return_first_choice_only)
@torch.compile
def foo(input, weight, bias):
return torch.addmm(bias, input, weight)
inps = (
torch.randn(20, 33, device=GPU_TYPE),
torch.randn(33, 16, device=GPU_TYPE),
torch.randn(20, 16, device=GPU_TYPE),
)
foo(*inps)
# Since we only have one choice, autotuning should be skipped
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 0)
# The preprocessing function should have been called
self.assertTrue(func_called[0])
@patch.object(select_algorithm, "VERIFY", dict(atol=5e-2, rtol=5e-2))
@patches
def test_addmm_fp16(self):
@torch.compile
def foo(input, weight, bias):
return torch.addmm(bias, input, weight)
inps = (
torch.randn(2, 320, device=GPU_TYPE, dtype=torch.half),
torch.randn(320, 320, device=GPU_TYPE, dtype=torch.half).t(),
torch.empty(320, device=GPU_TYPE, dtype=torch.half),
)
foo(*inps)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm(self):
@torch.compile
def foo(a, b):
return torch.mm(a, b)
foo(
torch.randn(8, 32, device=GPU_TYPE),
torch.randn(32, 8, device=GPU_TYPE),
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test__int_mm(self):
@torch.compile
def foo(a, b):
return torch._int_mm(a, b)
foo(
torch.randint(-10, 10, (64, 32), device=GPU_TYPE, dtype=torch.int8),
torch.randint(-10, 10, (32, 64), device=GPU_TYPE, dtype=torch.int8),
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm_skip(self):
@torch.compile
def foo(a, b):
return torch.mm(a, b)
foo(
torch.randn(8, 32, device=GPU_TYPE, dtype=torch.float64),
torch.randn(32, 8, device=GPU_TYPE, dtype=torch.float64),
)
# float64 not supported by tl.dot()
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 0)
@patches
def test_bmm(self):
@torch.compile
def foo(a, b):
return torch.bmm(a, b)
foo(
torch.randn(2, 8, 32, device=GPU_TYPE),
torch.randn(2, 32, 8, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm_not_even_k(self):
@torch.compile
def foo(a, b):
return torch.mm(a, b)
foo(
torch.randn(11, 22, device=GPU_TYPE),
torch.randn(22, 33, device=GPU_TYPE),
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_baddbmm(self):
@torch.compile
def foo(a, b, c):
return torch.baddbmm(c, a, b)
foo(
torch.randn(2, 8, 32, device=GPU_TYPE),
torch.randn(2, 32, 8, device=GPU_TYPE),
torch.randn(2, 1, 8, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm_plus_mm(self):
@torch.compile
def foo(a, b, c, d):
return (a @ b) + (c @ d)
foo(
torch.randn(32, 32, device=GPU_TYPE),
torch.randn(32, 32, device=GPU_TYPE),
torch.randn(32, 32, device=GPU_TYPE),
torch.randn(32, 32, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
# TODO: fix accuracy failure of the triton template on XPU.
# and enable this test case.
@patches
def test_mm_plus_mm2(self):
@torch.compile
def foo(a, b, c, d):
return (a @ b) + (c @ d)
foo(
torch.randn(512, 512, device=GPU_TYPE),
torch.randn(512, 512, device=GPU_TYPE),
torch.randn(512, 512, device=GPU_TYPE),
torch.randn(512, 512, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@expectedFailureDynamicWrapper
@patches
def test_mm_plus_mm3(self):
@torch.compile
def foo(a, b, c, d):
return (a @ b) + (c @ d)
foo(
torch.randn(512, 32, device=GPU_TYPE),
torch.randn(32, 8, device=GPU_TYPE),
torch.randn(512, 32, device=GPU_TYPE),
torch.randn(32, 8, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm_dup_args(self):
@torch.compile
def foo(a):
return torch.mm(a, a)
foo(torch.randn(32, 32, device=GPU_TYPE))
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
def test_mm_dup_args_view(self):
@torch.compile
def foo(a):
q = a[:32, :]
k = a[32:, :]
return torch.mm(q, k.transpose(0, 1))
foo(torch.randn(64, 64, device=GPU_TYPE))
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@expectedFailureDynamicWrapper
@patches
def test_convolution1(self):
@torch.compile
def foo(x, w, b):
return aten.convolution(
x + 1,
w,
b,
stride=(2, 3),
padding=(4, 5),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=1,
)
foo(
torch.randn(2, 33, 34, 41, device=GPU_TYPE),
torch.randn(34, 33, 3, 3, device=GPU_TYPE),
torch.randn(34, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@skipIfRocm
@patches
def test_mm_dropout(self):
@torch.compile
def fn(x1, x2, seed):
mm_4 = torch.ops.aten.mm.default(x2, x1)
rnd = torch.ops.prims.inductor_random.default(mm_4.shape, seed, "rand")
return mm_4 * rnd
if GPU_TYPE == "xpu":
patcher = patch.object(
select_algorithm, "VERIFY", dict(atol=1e-3, rtol=1e-3)
)
fn = patcher(fn)
# sizes picked so triton autotuning wins
fn(
torch.randn(512, 1024, dtype=torch.float16, device=GPU_TYPE),
torch.randn(384, 512, dtype=torch.float16, device=GPU_TYPE),
torch.tensor(12345, device=GPU_TYPE),
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
@torch._inductor.config.patch(conv_1x1_as_mm=False)
def test_convolution2(self):
@torch.compile
def foo(x, w, b):
return aten.convolution(
x,
w,
b,
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=1,
)
foo(
torch.randn(1, 33, 16, 16, device=GPU_TYPE),
torch.randn(34, 33, 1, 1, device=GPU_TYPE),
torch.randn(34, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
@torch._inductor.config.patch(conv_1x1_as_mm=True)
def test_convolution_as_mm(self):
@torch.compile
def foo(x, w, b):
return aten.convolution(
x + 1,
w,
b,
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=1,
)
foo(
torch.randn(2, 33, 16, 16, device=GPU_TYPE),
torch.randn(34, 33, 1, 1, device=GPU_TYPE),
torch.randn(34, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
@patches
@torch._inductor.config.patch(
{"conv_1x1_as_mm": True, "max_autotune_gemm_backends": "TRITON"}
)
def test_convolution_as_mm_triton_only(self):
# To convert the 1x1 conv to matmul, x is converted to a channels last
# tensor and the channels dimension is permuted to be innermost. This
# prologue should not be fused with the matmul since the prologue writes
# discontiguously, whilst the mm template currently only supports reading
# the input contiguously.
#
# Before the change associated with this PR, fusion would occur because the actual kernel
# input nodes (which don't include views e.g. permute) would be passed to the
# `TritonTemplateCaller` rather than the input nodes that include views.
# For example after x is converted to channels last, its layout is shape @ stride
# [2, 33, 16, 16] @ [8432, 1, 528, 33], or [2, 33, 256] @ [8432, 1, 33], and the
# prologue writes this value discontiguously.
# After the permute, the mm template fixes the layout to [512, 33] @ [33, 1] and
# reads the input contiguously. If the kernel input node for x is passed to the
# `TritonTemplateCaller`, then the scheduler will fuse the prologue since the
# write is compatible with the read. If however the viewed input is passed
# to `TritonTemplateCaller`, then the write won't be compatible with the read,
# and the prologue won't be fused.
def foo(x, w, b):
return aten.convolution(
x + 1,
w,
b,
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=1,
)
x = torch.randn(2, 33, 16, 16, device=GPU_TYPE)
w = torch.randn(34, 33, 1, 1, device=GPU_TYPE)
b = torch.randn(34, device=GPU_TYPE)
class SingleMMConfigChoice(InductorChoices):
def get_template_configs(
self,
kernel_inputs: KernelInputs,
templates: list[Union[KernelTemplate, ExternKernelChoice]],
op_name: str,
kwarg_overrides: Optional[dict[str, dict[str, Any]]] = None,
):
return super().get_template_configs(
kernel_inputs, templates, op_name, kwarg_overrides
)[:1]
with V.set_choices_handler(SingleMMConfigChoice()):
result_compile = torch.compile(foo)(x, w, b)
result_eager = foo(x, w, b)
# If the prologue has been fused this should fail
torch.testing.assert_close(result_compile, result_eager)
# There should not be any autotuning
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 0)
@patches
@torch._inductor.config.patch(conv_1x1_as_mm=False)
def test_convolution2_group(self):
@torch.compile
def foo(x, w, b):
return aten.convolution(
x,
w,
b,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=32, # group is not 1
)
foo(
torch.randn(1, 32, 16, 16, device=GPU_TYPE),
torch.randn(32, 1, 3, 3, device=GPU_TYPE),
torch.randn(32, device=GPU_TYPE),
)
# Autotuning checks correctness of each version
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
def test_TritonTemplateCaller_str(self):
"""
Make sure str(TritonTemplateCaller) does not raise exceptions.
"""
module_path = "abc.py"
bmreq = TritonBenchmarkRequest(
module_path=module_path,
module_cache_key=None,
kernel_name=None,
extra_args=None,
num_stages=None,
num_warps=None,
num_consumer_groups=None,
num_buffers_warp_spec=None,
input_tensor_meta=None,
output_tensor_meta=None,
)
caller = select_algorithm.TritonTemplateCaller(
None, None, None, None, "extra", bmreq
)
caller_str = str(caller)
self.assertEqual(caller_str, f"TritonTemplateCaller({module_path}, extra)")
@contextlib.contextmanager
def patch_lowering(lowering_overrides) -> Callable[[], None]:
import torch._inductor.lowering as inductor_lowering
with unittest.mock.patch.dict(inductor_lowering.lowerings):
for fn, (
decomp_fn,
broadcast,
type_promotion_kind,
convert_input_to_bool,
) in lowering_overrides.items():
inductor_lowering._register_lowering(
fn,
decomp_fn,
broadcast=broadcast,
type_promotion_kind=type_promotion_kind,
convert_input_to_bool=convert_input_to_bool,
lowering_dict=inductor_lowering.lowerings,
)
yield
|
TestSelectAlgorithm
|
python
|
openai__openai-python
|
tests/test_client.py
|
{
"start": 1110,
"end": 1734
}
|
class ____(Protocol):
request: httpx.Request
def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]:
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
return dict(url.params)
def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float:
return 0.1
def _get_open_connections(client: OpenAI | AsyncOpenAI) -> int:
transport = client._client._transport
assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
pool = transport._pool
return len(pool._requests)
|
MockRequestCall
|
python
|
cython__cython
|
Cython/Compiler/TypeSlots.py
|
{
"start": 16351,
"end": 16544
}
|
class ____(GCDependentSlot):
def slot_code(self, scope):
if scope.needs_tp_clear():
return GCDependentSlot.slot_code(self, scope)
return "0"
|
GCClearReferencesSlot
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 943310,
"end": 946758
}
|
class ____(Predicate):
"""
FieldEqualPredicate schema wrapper.
Parameters
----------
equal : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`
The value that the field should be equal to.
field : str, :class:`FieldName`
Field to be tested.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit for the field to be tested.
"""
_schema = {"$ref": "#/definitions/FieldEqualPredicate"}
def __init__(
self,
equal: Optional[
str | bool | float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
field: Optional[str | SchemaBase] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
**kwds,
):
super().__init__(equal=equal, field=field, timeUnit=timeUnit, **kwds)
|
FieldEqualPredicate
|
python
|
mitmproxy__pdoc
|
pdoc/__init__.py
|
{
"start": 659,
"end": 5080
}
|
class ____:
"""🐕"""
name: str
"""The name of our dog."""
friends: list["Dog"]
"""The friends of our dog."""
def __init__(self, name: str):
"""Make a Dog without any friends (yet)."""
self.name = name
self.friends = []
def bark(self, loud: bool = True):
"""*woof*"""
```
We can invoke pdoc to take our docstrings and render them into a standalone HTML document:
```shell
pdoc ./demo.py # or: pdoc my_module_name
```
This opens a browser with our module documentation. Here's a copy of what you should see:
<iframe style="
width: 100%;
height: 250px;
border: solid gray 1px;
display: block;
margin: 1rem auto;
border-radius: 5px;"
title="rendered demo.py documentation"
src="https://pdoc.dev/docs/demo-standalone.html"></iframe>
If you look closely, you'll notice that docstrings are interpreted as Markdown.
For example, \`pdoc\` is rendered as `pdoc`. Additionally, identifiers such as the type annotation
for `Dog.friends` are automatically linked.
If we edit `demo.py` now, the page will reload automatically.
Once we are happy with everything, we can export the documentation to an HTML file:
```shell
pdoc ./demo.py -o ./docs
```
This will create an HTML file at `docs/demo.html` which contains our module documentation. 🎉
## Customizing pdoc
We can optionally configure pdoc's output via command line flags.
For example, we can add a project logo to the documentation:
```shell
pdoc ./demo.py --logo "https://placedog.net/300?random"
```
To get a list of all available rendering options, run:
```shell
pdoc --help
```
If you need more advanced customization options, see [*How can I edit pdoc's HTML template?*](#edit-pdocs-html-template).
## Deploying to GitHub Pages
*In this example we'll deploy pdoc's documentation to GitHub Pages. Of course, you can distribute
the generated documentation however you want! pdoc's job is to "just" produce self-contained HTML files for you.*
A very simple way to host your API documentation is to set up a continuous integration job which
pushes your documentation to GitHub Pages. This keeps your docs updated automatically.
1. Enable GitHub Actions and GitHub Pages for your project.
2. In the GitHub Pages settings, select GitHub Actions as your build and deployment source.
3. Copy pdoc's GitHub Actions workflow into your own repository and adjust it to how you build your docs:
[`.github/workflows/docs.yml`](https://github.com/mitmproxy/pdoc/blob/main/.github/workflows/docs.yml)
That's it – no need to fiddle with any secrets or set up any `gh-pages` branches. 🥳
# How can I ... ?
## ...add documentation?
In Python, objects like modules, functions and classes have
a special attribute named `__doc__` which contains that object's
*docstring*. The docstring comes from a special placement of a string
in your source code. For example, the following code shows how to
define a function with a docstring and access the contents of that
docstring:
```python
>>> def test():
... """This is a docstring."""
... pass
...
>>> test.__doc__
'This is a docstring.'
```
Something similar can be done for classes and modules too. For classes,
the docstring should come on the line immediately following `class
...`. For modules, the docstring should start on the first line of
the file. These docstrings are what you see for each module, class,
function and method listed in the documentation produced by pdoc.
## ...document variables?
Python itself [does not attach docstrings to
variables](https://www.python.org/dev/peps/pep-0224/). For example:
```python
variable = "SomeValue"
"""Docstring for variable."""
```
The resulting `variable` will have no `__doc__` attribute.
To compensate, pdoc will read the abstract syntax tree (an abstract representation of the source code)
and include all assignment statements immediately followed by a docstring. This approach is not formally standardized,
but followed by many tools, including Sphinx's autodoc extension in case you ever decide to migrate off pdoc.
Docstring detection is limited to the current module, docstrings for variables imported from other modules are not
picked up.
Something similar is done for instance variables, which are either type-annotated in the class
or defined in a class's `__init__`. Here is an example showing both conventions detected by pdoc:
```python
|
Dog
|
python
|
apache__airflow
|
dev/airflow_perf/dags/elastic_dag.py
|
{
"start": 4004,
"end": 6122
}
|
class ____(Enum):
"""
Define shape of the Dag that will be used for testing.
"""
NO_STRUCTURE = "no_structure"
LINEAR = "linear"
BINARY_TREE = "binary_tree"
STAR = "star"
GRID = "grid"
DAG_PREFIX = os.environ.get("PERF_DAG_PREFIX", "perf_scheduler")
DAG_COUNT = int(os.environ["PERF_DAGS_COUNT"])
TASKS_COUNT = int(os.environ["PERF_TASKS_COUNT"])
START_DATE_ENV = os.environ.get("PERF_START_AGO", "1h")
START_DATE = datetime.now() - parse_time_delta(START_DATE_ENV)
SCHEDULE_INTERVAL_ENV = os.environ.get("PERF_SCHEDULE_INTERVAL", "@once")
SCHEDULE_INTERVAL = parse_schedule_interval(SCHEDULE_INTERVAL_ENV)
SHAPE = DagShape(os.environ["PERF_SHAPE"])
args = {"owner": "airflow", "start_date": START_DATE}
if "PERF_MAX_RUNS" in os.environ:
if isinstance(SCHEDULE_INTERVAL, str):
raise ValueError("Can't set max runs with string-based schedule_interval")
num_runs = int(os.environ["PERF_MAX_RUNS"])
args["end_date"] = START_DATE + (SCHEDULE_INTERVAL * (num_runs - 1))
for dag_no in range(1, DAG_COUNT + 1):
dag = DAG(
dag_id=safe_dag_id(
"__".join(
[
DAG_PREFIX,
f"SHAPE={SHAPE.name.lower()}",
f"DAGS_COUNT={dag_no}_of_{DAG_COUNT}",
f"TASKS_COUNT=${TASKS_COUNT}",
f"START_DATE=${START_DATE_ENV}",
f"SCHEDULE=${SCHEDULE_INTERVAL_ENV}",
]
)
),
is_paused_upon_creation=False,
default_args=args,
schedule=SCHEDULE_INTERVAL,
)
elastic_dag_tasks = [
BashOperator(task_id="__".join(["tasks", f"{i}_of_{TASKS_COUNT}"]), bash_command="echo test", dag=dag)
for i in range(1, TASKS_COUNT + 1)
]
shape_function_map = {
DagShape.LINEAR: chain,
DagShape.BINARY_TREE: chain_as_binary_tree,
DagShape.STAR: chain_as_star,
DagShape.GRID: chain_as_grid,
}
if SHAPE != DagShape.NO_STRUCTURE:
shape_function_map[SHAPE](*elastic_dag_tasks)
globals()[f"dag_{dag_no}"] = dag
|
DagShape
|
python
|
getlogbook__logbook
|
src/logbook/more.py
|
{
"start": 14822,
"end": 16786
}
|
class ____(Handler):
"""A handler that deduplicates log messages.
It emits each unique log record once, along with the number of times it was
emitted.
Example:::
with logbook.more.DedupHandler():
logbook.error("foo")
logbook.error("bar")
logbook.error("foo")
The expected output:
.. code-block:: text
message repeated 2 times: foo
message repeated 1 times: bar
"""
def __init__(
self, format_string="message repeated {count} times: {message}", *args, **kwargs
):
Handler.__init__(self, *args, bubble=False, **kwargs)
self._format_string = format_string
self.clear()
def clear(self):
self._message_to_count = defaultdict(int)
self._unique_ordered_records = []
def pop_application(self):
Handler.pop_application(self)
self.flush()
def pop_thread(self):
Handler.pop_thread(self)
self.flush()
def pop_context(self):
Handler.pop_context(self)
self.flush()
def pop_greenlet(self):
Handler.pop_greenlet(self)
self.flush()
def handle(self, record):
if record.message not in self._message_to_count:
self._unique_ordered_records.append(record)
self._message_to_count[record.message] += 1
return True
def flush(self):
for record in self._unique_ordered_records:
record.message = self._format_string.format(
message=record.message, count=self._message_to_count[record.message]
)
# record.dispatcher is the logger who created the message,
# it's sometimes supressed (by logbook.info for example)
if record.dispatcher is not None:
dispatch = record.dispatcher.call_handlers
else:
dispatch = dispatch_record
dispatch(record)
self.clear()
|
DedupHandler
|
python
|
huggingface__transformers
|
tests/models/pvt/test_image_processing_pvt.py
|
{
"start": 1005,
"end": 2709
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
|
PvtImageProcessingTester
|
python
|
qdrant__qdrant-client
|
qdrant_client/grpc/collections_service_pb2_grpc.py
|
{
"start": 211,
"end": 4422
}
|
class ____(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/qdrant.Collections/Get',
request_serializer=collections__pb2.GetCollectionInfoRequest.SerializeToString,
response_deserializer=collections__pb2.GetCollectionInfoResponse.FromString,
)
self.List = channel.unary_unary(
'/qdrant.Collections/List',
request_serializer=collections__pb2.ListCollectionsRequest.SerializeToString,
response_deserializer=collections__pb2.ListCollectionsResponse.FromString,
)
self.Create = channel.unary_unary(
'/qdrant.Collections/Create',
request_serializer=collections__pb2.CreateCollection.SerializeToString,
response_deserializer=collections__pb2.CollectionOperationResponse.FromString,
)
self.Update = channel.unary_unary(
'/qdrant.Collections/Update',
request_serializer=collections__pb2.UpdateCollection.SerializeToString,
response_deserializer=collections__pb2.CollectionOperationResponse.FromString,
)
self.Delete = channel.unary_unary(
'/qdrant.Collections/Delete',
request_serializer=collections__pb2.DeleteCollection.SerializeToString,
response_deserializer=collections__pb2.CollectionOperationResponse.FromString,
)
self.UpdateAliases = channel.unary_unary(
'/qdrant.Collections/UpdateAliases',
request_serializer=collections__pb2.ChangeAliases.SerializeToString,
response_deserializer=collections__pb2.CollectionOperationResponse.FromString,
)
self.ListCollectionAliases = channel.unary_unary(
'/qdrant.Collections/ListCollectionAliases',
request_serializer=collections__pb2.ListCollectionAliasesRequest.SerializeToString,
response_deserializer=collections__pb2.ListAliasesResponse.FromString,
)
self.ListAliases = channel.unary_unary(
'/qdrant.Collections/ListAliases',
request_serializer=collections__pb2.ListAliasesRequest.SerializeToString,
response_deserializer=collections__pb2.ListAliasesResponse.FromString,
)
self.CollectionClusterInfo = channel.unary_unary(
'/qdrant.Collections/CollectionClusterInfo',
request_serializer=collections__pb2.CollectionClusterInfoRequest.SerializeToString,
response_deserializer=collections__pb2.CollectionClusterInfoResponse.FromString,
)
self.CollectionExists = channel.unary_unary(
'/qdrant.Collections/CollectionExists',
request_serializer=collections__pb2.CollectionExistsRequest.SerializeToString,
response_deserializer=collections__pb2.CollectionExistsResponse.FromString,
)
self.UpdateCollectionClusterSetup = channel.unary_unary(
'/qdrant.Collections/UpdateCollectionClusterSetup',
request_serializer=collections__pb2.UpdateCollectionClusterSetupRequest.SerializeToString,
response_deserializer=collections__pb2.UpdateCollectionClusterSetupResponse.FromString,
)
self.CreateShardKey = channel.unary_unary(
'/qdrant.Collections/CreateShardKey',
request_serializer=collections__pb2.CreateShardKeyRequest.SerializeToString,
response_deserializer=collections__pb2.CreateShardKeyResponse.FromString,
)
self.DeleteShardKey = channel.unary_unary(
'/qdrant.Collections/DeleteShardKey',
request_serializer=collections__pb2.DeleteShardKeyRequest.SerializeToString,
response_deserializer=collections__pb2.DeleteShardKeyResponse.FromString,
)
|
CollectionsStub
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/control_flow/py_func_test.py
|
{
"start": 18805,
"end": 31569
}
|
class ____(PyFuncTestBase):
"""Encapsulates tests for eager_py_func only."""
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputInt32(self):
a = array_ops.ones((3, 3), dtype=dtypes.int32)
x = array_ops.ones((3, 1), dtype=dtypes.int32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.int32)
ret = self.evaluate(output)
self.assertAllEqual(ret, [[3], [3], [3]])
@test_util.run_in_graph_and_eager_modes
def testRenamedDeviceInTestClusterCorrectlyIdentifiedAsLocalhost(self):
if context.executing_eagerly():
self.skipTest("b/126565353: We don't test eager's remote execution.")
workers, _ = test_util.create_local_cluster(num_workers=1, num_ps=0)
worker = workers[0]
session = session_lib.Session(worker.target)
with ops.device("/job:worker/task:0/cpu:0"):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = session.run(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerSingleOutputFloat32(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
ret = self.evaluate(output)
self.assertAllClose(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
def testEagerArrayOutput(self):
with test_util.device(use_gpu=True):
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
output = script_ops.eager_py_func(
lambda a, x: [matmul(a, x)], inp=[a, x], Tout=[dtypes.float32])
ret = self.evaluate(output)
self.assertAllEqual(ret, [[[3.0], [3.0], [3.0]]])
@test_util.run_in_graph_and_eager_modes
def testEagerReturnNone(self):
with test_util.device(use_gpu=True):
def no_return_value():
return
output = script_ops.eager_py_func(no_return_value, inp=[], Tout=[])
ret = self.evaluate(output)
if context.executing_eagerly():
self.assertEqual(len(ret), 0)
else:
self.assertIsNone(ret)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_tfrt("b/180469928")
def testEagerPyFuncInDefun(self):
with test_util.device(use_gpu=True):
def wrapper():
a = array_ops.ones((3, 3), dtype=dtypes.float32)
x = array_ops.ones((3, 1), dtype=dtypes.float32)
return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)
wrapped = def_function.function(wrapper)
ret = self.evaluate(wrapped())
self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerExceptionHandling(self):
with test_util.device(use_gpu=True):
self.verifyExceptionHandling(
ValueError, errors.InvalidArgumentError, eager=True)
self.verifyExceptionHandling(
TypeError, errors.InvalidArgumentError, eager=True)
self.verifyExceptionHandling(
StopIteration, errors.OutOfRangeError, eager=True)
self.verifyExceptionHandling(
MemoryError, errors.ResourceExhaustedError, eager=True)
self.verifyExceptionHandling(
NotImplementedError, errors.UnimplementedError, eager=True)
class WeirdError(Exception):
pass
self.verifyExceptionHandling(WeirdError, errors.UnknownError, eager=True)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testEagerReturningVariableRaisesError(self):
def return_variable():
return resource_variable_ops.ResourceVariable(0.0)
with self.assertRaisesRegex(errors.UnknownError,
"Attempting to return a variable"):
output = script_ops.eager_py_func(
return_variable, inp=[], Tout=dtypes.float32)
self.evaluate(output)
@test_util.run_in_graph_and_eager_modes
def testTapeCache(self):
# Testing for b/198962664 (gh:#51839)
old_cache_size = len(script_ops.tape_cache)
def f(x):
return x**2
x = constant_op.constant(3.0)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
# No cache if there is no active tape
self.assertEqual(len(script_ops.tape_cache), old_cache_size)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
# A new cache entry is created when running eagerly.
if context.executing_eagerly():
self.assertEqual(len(script_ops.tape_cache), old_cache_size + 1)
else:
self.assertEqual(len(script_ops.tape_cache), old_cache_size)
dy_dx = tape.gradient(y, x)
# Force a evaluation.
self.evaluate(dy_dx)
# Cache entry consumed after gradient calculation.
self.assertEqual(len(script_ops.tape_cache), old_cache_size)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTape(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = tape.gradient(y, x)
self.assertAllClose(self.evaluate(dy_dx), 6.0)
# Test complex values
x = constant_op.constant(3.0 + 3.0j)
with backprop.GradientTape() as tape:
tape.watch(x)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.complex128)
dy_dx = tape.gradient(y, x)
# Gradient of complex will be the conj
self.assertAllClose(self.evaluate(dy_dx), 6.0 - 6.0j)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraph(self):
def f(x):
return x**2
x = constant_op.constant(3.0)
y = script_ops.eager_py_func(f, inp=[x], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphTwoOutputs(self):
def f(x, y):
return x * y, x / y
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
fa, fb = script_ops.eager_py_func(f, inp=[x, y],
Tout=[dtypes.float32, dtypes.float32])
dy_dx = gradients_impl.gradients(fa + fb, x)[0]
self.assertEqual(self.evaluate(dy_dx), 2.5)
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = tape.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphMultipleArgs(self):
def f(x, y):
return x**2 + y**2
x = constant_op.constant(3.0)
y = constant_op.constant(4.0)
z = script_ops.eager_py_func(f, inp=[x, y], Tout=dtypes.float32)
dz_dx, dz_dy = gradients_impl.gradients(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 6.0)
self.assertEqual(self.evaluate(dz_dy), 8.0)
@test_util.run_v1_only("b/120545219")
def testEagerGradientGraphLogHuber(self):
def log_huber(x, m):
if math_ops.abs(x) <= m:
return x**2
else:
return m**2 * (1 - 2 * math_ops.log(m) + math_ops.log(x**2))
x = array_ops.placeholder(dtypes.float32)
m = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(
func=log_huber, inp=[x, m], Tout=dtypes.float32)
dy_dx = gradients_impl.gradients(y, x)[0]
with self.cached_session() as sess:
# Takes the first branch of log_huber.
y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0})
self.assertEqual(y, 1.0)
self.assertEqual(dy_dx, 2.0)
@test_util.run_v1_only("b/120545219")
def testEagerRespectsDevicePlacementOfOp(self):
def f(x):
return math_ops.square(x)
def g(x):
return math_ops.add(x, x)
with ops.device("/CPU:0"):
# Explicitly ask for the py_funcs to execute on CPU, even if
# a GPU is available.
x = array_ops.placeholder(dtypes.float32)
y = script_ops.eager_py_func(func=f, inp=[x], Tout=dtypes.float32)
z = script_ops.eager_py_func(func=g, inp=[y], Tout=dtypes.float32)
with self.session() as sess:
output = sess.run(z, feed_dict={x: 3.0})
self.assertEqual(output, 18.0)
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncOnGPUWithStrings(self):
def fn(a):
return str(a.dtype)
x = constant_op.constant("x", dtype=dtypes.string)
output = script_ops.eager_py_func(fn, inp=[x], Tout=dtypes.string)
self.assertEqual(self.evaluate(output), "<dtype: 'string'>".encode("utf8"))
@test_util.run_in_graph_and_eager_modes
def testEagerPyFuncNotACallable(self):
x = constant_op.constant("x", dtype=dtypes.string)
with self.assertRaisesRegex(ValueError, "callable"):
_ = script_ops.eager_py_func(x, inp=[x], Tout=dtypes.string)
def testUnsupportedToutType(self):
with self.assertRaisesRegex(
TypeError, "Cannot convert .* to a TensorFlow DType."):
script_ops.eager_py_func(lambda x: x, [1], [{}])
def testRaggedTensorArg(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6]])
y, = script_ops.eager_py_func(math_ops.reduce_sum, [x], [dtypes.int32])
self.assertAllEqual(y, 21)
def testRaggedTensorReturn(self):
def fn(v, l):
return ragged_tensor.RaggedTensor.from_row_lengths(v, l)
values = [1, 2, 3, 4, 5, 6]
lengths = constant_op.constant([3, 1, 2], dtypes.int64)
out_signature = [ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]
y, = script_ops.eager_py_func(fn, [values, lengths], out_signature)
self.assertIsInstance(y, ragged_tensor.RaggedTensor)
self.assertAllEqual(y, [[1, 2, 3], [4], [5, 6]])
def testRaggedTensorBroadcast(self):
# Check that eager_py_func preserves output shape information, which is
# required by broadcasting.
def fn(x):
return 2 * x
def foo(x):
spec = ragged_tensor.RaggedTensorSpec.from_value(x)
res = script_ops.eager_py_func(fn, [x], spec)
return x + res
x = ragged_factory_ops.constant([[1.0, 2.0], [3.0]])
expected_result = [[3.0, 6.0], [9.0]]
result1 = foo(x)
self.assertAllEqual(result1, expected_result)
result2 = def_function.function(foo)(x)
self.assertAllEqual(result2, expected_result)
def testRaggedExpectedListGotList(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6]])
x_spec = type_spec.type_spec_from_value(x)
y, = script_ops.eager_py_func(lambda v: [v], [x], [x_spec])
self.assertAllEqual(y, x)
def testRaggedExpectedListGotTuple(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6]])
x_spec = type_spec.type_spec_from_value(x)
y, = script_ops.eager_py_func(lambda v: (v,), [x], [x_spec])
self.assertAllEqual(y, x)
def testRaggedExpectedListGotSingleValue(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6]])
x_spec = type_spec.type_spec_from_value(x)
y, = script_ops.eager_py_func(lambda v: v, [x], [x_spec])
self.assertAllEqual(y, x)
def testRaggedNoReturnValue(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6]])
result = self.evaluate(script_ops.eager_py_func(lambda v: None, [x], []))
if context.executing_eagerly():
self.assertEqual(result, [])
else:
self.assertIsNone(result)
def testRaggedBadReturnTypeExpectedTensorReturnedRagged(self):
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5]])
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"py_function: func=.* returned .* which did not match Tout=.*"):
result = script_ops.eager_py_func(lambda x: x + 3, [rt], [dtypes.int32])
self.evaluate(result)
def testRaggedBadReturnTypeExpectedRaggedReturnedTensor(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"py_function: func=.* returned .* which did not match Tout=.*"):
result = script_ops.eager_py_func(
func=lambda x: x,
inp=[constant_op.constant([[1, 2, 3]])],
Tout=[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)])
self.evaluate(result)
if __name__ == "__main__":
test.main()
|
EagerPyFuncTest
|
python
|
keras-team__keras
|
keras/src/ops/nn.py
|
{
"start": 14090,
"end": 14896
}
|
class ____(Operation):
def call(self, x):
return backend.nn.selu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.selu", "keras.ops.nn.selu"])
def selu(x):
"""Scaled Exponential Linear Unit (SELU) activation function.
It is defined as:
`f(x) = scale * alpha * (exp(x) - 1.) for x < 0`,
`f(x) = scale * x for x >= 0`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_selu = keras.ops.selu(x)
>>> print(x_selu)
array([-1.11133055, 0., 1.05070098], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Selu().symbolic_call(x)
return backend.nn.selu(x)
|
Selu
|
python
|
davidhalter__jedi
|
test/completion/goto.py
|
{
"start": 1353,
"end": 1404
}
|
class ____:
def foo(self):
print("foo")
|
Foo
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_xcom.py
|
{
"start": 6907,
"end": 13803
}
|
class ____:
@pytest.fixture
def setup_for_xcom_get_one(self, task_instance, push_simple_json_xcom):
push_simple_json_xcom(ti=task_instance, key="xcom_1", value={"key": "value"})
@pytest.mark.usefixtures("setup_for_xcom_get_one")
def test_xcom_get_one(self, session, task_instance):
stored_value = session.execute(
XComModel.get_many(
key="xcom_1",
dag_ids=task_instance.dag_id,
task_ids=task_instance.task_id,
run_id=task_instance.run_id,
).with_only_columns(XComModel.value)
).first()
assert XComModel.deserialize_value(stored_value) == {"key": "value"}
@pytest.fixture
def tis_for_xcom_get_one_from_prior_date(self, task_instance_factory, push_simple_json_xcom):
date1 = timezone.datetime(2021, 12, 3, 4, 56)
ti1 = task_instance_factory(dag_id="dag", logical_date=date1, task_id="task_1")
ti2 = task_instance_factory(
dag_id="dag",
logical_date=date1 + datetime.timedelta(days=1),
task_id="task_1",
)
# The earlier run pushes an XCom, but not the later run, but the later
# run can get this earlier XCom with ``include_prior_dates``.
push_simple_json_xcom(ti=ti1, key="xcom_1", value={"key": "value"})
return ti1, ti2
@pytest.fixture
def tis_for_xcom_get_one_from_prior_date_without_logical_date(
self, task_instance_factory, push_simple_json_xcom
):
date1 = timezone.datetime(2021, 12, 3, 4, 56)
ti1 = task_instance_factory(dag_id="dag", logical_date=None, task_id="task_1", run_after=date1)
ti2 = task_instance_factory(
dag_id="dag",
logical_date=None,
run_after=date1 + datetime.timedelta(days=1),
task_id="task_1",
)
# The earlier run pushes an XCom, but not the later run, but the later
# run can get this earlier XCom with ``include_prior_dates``.
push_simple_json_xcom(ti=ti1, key="xcom_1", value={"key": "value"})
return ti1, ti2
def test_xcom_get_one_from_prior_date(self, session, tis_for_xcom_get_one_from_prior_date):
_, ti2 = tis_for_xcom_get_one_from_prior_date
retrieved_value = session.execute(
XComModel.get_many(
run_id=ti2.run_id,
key="xcom_1",
task_ids="task_1",
dag_ids="dag",
include_prior_dates=True,
).with_only_columns(XComModel.value)
).first()
assert XComModel.deserialize_value(retrieved_value) == {"key": "value"}
def test_xcom_get_one_from_prior_date_with_no_logical_dates(
self, session, tis_for_xcom_get_one_from_prior_date_without_logical_date
):
_, ti2 = tis_for_xcom_get_one_from_prior_date_without_logical_date
retrieved_value = session.execute(
XComModel.get_many(
run_id=ti2.run_id,
key="xcom_1",
task_ids="task_1",
dag_ids="dag",
include_prior_dates=True,
).with_only_columns(XComModel.value)
).first()
assert XComModel.deserialize_value(retrieved_value) == {"key": "value"}
@pytest.fixture
def setup_for_xcom_get_many_single_argument_value(self, task_instance, push_simple_json_xcom):
push_simple_json_xcom(ti=task_instance, key="xcom_1", value={"key": "value"})
@pytest.mark.usefixtures("setup_for_xcom_get_many_single_argument_value")
def test_xcom_get_many_single_argument_value(self, session, task_instance):
stored_xcoms = session.scalars(
XComModel.get_many(
key="xcom_1",
dag_ids=task_instance.dag_id,
task_ids=task_instance.task_id,
run_id=task_instance.run_id,
)
).all()
assert len(stored_xcoms) == 1
assert stored_xcoms[0].key == "xcom_1"
assert stored_xcoms[0].value == json.dumps({"key": "value"})
@pytest.fixture
def setup_for_xcom_get_many_multiple_tasks(self, task_instances, push_simple_json_xcom):
ti1, ti2 = task_instances
push_simple_json_xcom(ti=ti1, key="xcom_1", value={"key1": "value1"})
push_simple_json_xcom(ti=ti2, key="xcom_1", value={"key2": "value2"})
@pytest.mark.usefixtures("setup_for_xcom_get_many_multiple_tasks")
def test_xcom_get_many_multiple_tasks(self, session, task_instance):
stored_xcoms = session.scalars(
XComModel.get_many(
key="xcom_1",
dag_ids=task_instance.dag_id,
task_ids=["task_1", "task_2"],
run_id=task_instance.run_id,
)
).all()
sorted_values = [x.value for x in sorted(stored_xcoms, key=operator.attrgetter("task_id"))]
assert sorted_values == [json.dumps({"key1": "value1"}), json.dumps({"key2": "value2"})]
@pytest.fixture
def tis_for_xcom_get_many_from_prior_dates(self, task_instance_factory, push_simple_json_xcom):
date1 = timezone.datetime(2021, 12, 3, 4, 56)
date2 = date1 + datetime.timedelta(days=1)
ti1 = task_instance_factory(dag_id="dag", task_id="task_1", logical_date=date1)
ti2 = task_instance_factory(dag_id="dag", task_id="task_1", logical_date=date2)
push_simple_json_xcom(ti=ti1, key="xcom_1", value={"key1": "value1"})
push_simple_json_xcom(ti=ti2, key="xcom_1", value={"key2": "value2"})
return ti1, ti2
def test_xcom_get_many_from_prior_dates(self, session, tis_for_xcom_get_many_from_prior_dates):
ti1, ti2 = tis_for_xcom_get_many_from_prior_dates
session.add(ti1) # for some reason, ti1 goes out of the session scope
stored_xcoms = session.scalars(
XComModel.get_many(
run_id=ti2.run_id,
key="xcom_1",
dag_ids="dag",
task_ids="task_1",
include_prior_dates=True,
)
).all()
# The retrieved XComs should be ordered by logical date, latest first.
assert [x.value for x in stored_xcoms] == list(
map(lambda j: json.dumps(j), [{"key2": "value2"}, {"key1": "value1"}])
)
assert [x.logical_date for x in stored_xcoms] == [ti2.logical_date, ti1.logical_date]
def test_xcom_get_invalid_key(self, session, task_instance):
"""Test that getting an XCom with an invalid key raises a ValueError."""
with pytest.raises(ValueError, match="XCom key must be a non-empty string. Received: ''"):
XComModel.get_many(
key="", # Invalid key
dag_ids=task_instance.dag_id,
task_ids=task_instance.task_id,
run_id=task_instance.run_id,
)
|
TestXComGet
|
python
|
optuna__optuna
|
optuna/visualization/_slice.py
|
{
"start": 1235,
"end": 1330
}
|
class ____(NamedTuple):
target_name: str
subplots: list[_SliceSubplotInfo]
|
_SlicePlotInfo
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_tool_param.py
|
{
"start": 661,
"end": 1911
}
|
class ____(TypedDict, total=False):
input_schema: Required[InputSchema]
"""[JSON schema](https://json-schema.org/draft/2020-12) for this tool's input.
This defines the shape of the `input` that your tool accepts and that the model
will produce.
"""
name: Required[str]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
description: str
"""Description of what this tool does.
Tool descriptions should be as detailed as possible. The more information that
the model has about what the tool is and how to use it, the better it will
perform. You can use natural language descriptions to reinforce important
aspects of the tool input JSON schema.
"""
input_examples: Iterable[Dict[str, object]]
strict: bool
type: Optional[Literal["custom"]]
|
BetaToolParam
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_workflows.py
|
{
"start": 10729,
"end": 11935
}
|
class ____:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
op = WorkflowsCancelExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
context = mock.MagicMock()
result = op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.cancel_execution.assert_called_once_with(
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == mock_object.to_dict.return_value
|
TestWorkflowExecutionsCancelExecutionOperator
|
python
|
mlflow__mlflow
|
mlflow/catboost/__init__.py
|
{
"start": 12662,
"end": 13236
}
|
class ____:
def __init__(self, cb_model):
self.cb_model = cb_model
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.cb_model
def predict(self, dataframe, params: dict[str, Any] | None = None):
"""
Args:
dataframe: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
return self.cb_model.predict(dataframe)
# TODO: Support autologging
|
_CatboostModelWrapper
|
python
|
django__django
|
django/test/html.py
|
{
"start": 6278,
"end": 6511
}
|
class ____(Element):
def __init__(self):
super().__init__(None, ())
def __str__(self):
return "".join(
[html.escape(c) if isinstance(c, str) else str(c) for c in self.children]
)
|
RootElement
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF045.py
|
{
"start": 109,
"end": 752
}
|
class ____:
# Errors
no_annotation = r"foo"
missing = MISSING
field = field()
# No errors
__slots__ = ("foo", "bar")
__radd__ = __add__
_private_attr = 100
with_annotation: str
with_annotation_and_default: int = 42
with_annotation_and_field_specifier: bytes = field()
class_var_no_arguments: ClassVar = 42
class_var_with_arguments: ClassVar[int] = 42
init_var_no_arguments: InitVar = "lorem"
init_var_with_arguments: InitVar[str] = "ipsum"
kw_only: KW_ONLY
tu, ple, [unp, ack, ing] = (0, 1, 2, [3, 4, 5])
mul, [ti, ple] = (a, ssign), ment = {1: b"3", "2": 4}, [6j, 5]
|
C
|
python
|
davidhalter__jedi
|
jedi/plugins/django.py
|
{
"start": 10289,
"end": 10656
}
|
class ____(ValueWrapper):
def __init__(self, method, model_cls):
super().__init__(method)
self._model_cls = model_cls
def py__get__(self, instance, class_value):
return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls)
for v in self._wrapped_value.py__get__(instance, class_value)})
|
QuerySetMethodWrapper
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/schemas/bm25_tokenizer.py
|
{
"start": 2569,
"end": 3277
}
|
class ____:
"""Adapter that provides the uniform `stem` API used across languages."""
def __init__(self) -> None:
try:
import snowballstemmer
except ImportError:
raise ValueError(
"The snowballstemmer python package is not installed. Please install it with `pip install snowballstemmer`"
)
self._stemmer = snowballstemmer.stemmer("english")
def stem(self, token: str) -> str:
return cast(str, self._stemmer.stemWord(token))
@lru_cache(maxsize=1)
def get_english_stemmer() -> SnowballStemmer:
"""Return a cached Snowball stemmer for English."""
return _SnowballStemmerAdapter()
|
_SnowballStemmerAdapter
|
python
|
getsentry__sentry
|
tests/sentry/seer/fetch_issues/test_by_function_name.py
|
{
"start": 3056,
"end": 9860
}
|
class ____(CreateEventTestCase):
def setUp(self):
super().setUp()
self.event_timestamp_start = datetime.now(UTC) - timedelta(days=NUM_DAYS_AGO)
self.event_timestamp_end = datetime.now(UTC)
def test_empty_projects_list(self):
result = _get_issues_for_file(
projects=[],
sentry_filenames=["foo.py"],
function_names=["test_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
assert result == []
@patch("sentry.seer.fetch_issues.by_function_name.raw_snql_query")
def test_snuba_query_exception(self, mock_query):
mock_query.side_effect = Exception("Snuba error")
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["foo.py"],
function_names=["test_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
assert result == []
def test_basic_matching(self):
# Create events that should match our search criteria
group = self._create_event(
function_names=["target_func", "other_func"],
filenames=["test.py", "other.py"],
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"],
function_names=["target_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should find the matching issue
assert len(result) > 0
group_ids = [issue["group_id"] for issue in result]
assert group.id in group_ids
def test_filename_mismatch(self):
# Create event with different filename
group = self._create_event(
function_names=["target_func"],
filenames=["other.py"],
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"], # Different filename
function_names=["target_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should not find the issue due to filename mismatch
group_ids = [issue["group_id"] for issue in result]
assert group.id not in group_ids
def test_function_name_mismatch(self):
# Create event with different function name
group = self._create_event(
function_names=["other_func"],
filenames=["test.py"],
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"],
function_names=["target_func"], # Different function name
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should not find the issue due to function name mismatch
group_ids = [issue["group_id"] for issue in result]
assert group.id not in group_ids
def test_event_too_old(self):
# Create old event - use a smaller offset to avoid timestamp validation errors
group = self._create_event(
function_names=["target_func"],
filenames=["test.py"],
timestamp=before_now(days=NUM_DAYS_AGO + 1).isoformat(),
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"],
function_names=["target_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should not find the old event
group_ids = [issue["group_id"] for issue in result]
assert group.id not in group_ids
def test_javascript_simple(self):
# Test with JavaScript files to ensure language-agnostic functionality
group = self._create_event(
function_names=["component.blue", "world"],
filenames=["foo.js", "baz.js"],
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["baz.js"],
function_names=["world"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should find the matching JS issue
assert len(result) > 0
group_ids = [issue["group_id"] for issue in result]
assert group.id in group_ids
def test_stackframe_limit_edge_case(self):
# Create event with function name within the searchable stackframe range
# The query searches the last STACKFRAME_COUNT frames (negative indices)
# So put our target function in the last frame (which will be index -1)
function_names = ["other_func" for _ in range(STACKFRAME_COUNT - 1)] + ["world"]
filenames = ["other.py" for _ in range(STACKFRAME_COUNT - 1)] + ["test.py"]
group = self._create_event(
function_names=function_names,
filenames=filenames,
user_id="1",
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"],
function_names=["world"], # Should find this in last frame
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should find the issue if the function is within stackframe limit
group_ids = [issue["group_id"] for issue in result]
assert group.id in group_ids
def test_multiple_matching_issues(self):
# Create multiple events that should match
group1 = self._create_event(
function_names=["target_func"], filenames=["test.py"], user_id="1", culprit="issue1"
).group
group2 = self._create_event(
function_names=["target_func"], filenames=["test.py"], user_id="2", culprit="issue2"
).group
result = _get_issues_for_file(
projects=[self.project],
sentry_filenames=["test.py"],
function_names=["target_func"],
event_timestamp_start=self.event_timestamp_start,
event_timestamp_end=self.event_timestamp_end,
)
# Should find both matching issues
group_ids = [issue["group_id"] for issue in result]
assert group1.id in group_ids
assert group2.id in group_ids
|
TestGetIssuesForFile
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterpolar/marker/colorbar/title/_font.py
|
{
"start": 233,
"end": 9974
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar.marker.colorbar.title"
_path_str = "scatterpolar.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.m
arker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0015_add_project_allow_promos.py
|
{
"start": 100,
"end": 604
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0014_add-state-tracking"),
]
operations = [
migrations.AddField(
model_name="project",
name="allow_promos",
field=models.BooleanField(
default=True,
help_text="Allow sponsor advertisements on my project documentation",
verbose_name="Sponsor advertisements",
),
),
]
|
Migration
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/metrics/multicolumn_map_metrics/multicolumn_sum_equal.py
|
{
"start": 521,
"end": 1722
}
|
class ____(MulticolumnMapMetricProvider):
condition_metric_name = "multicolumn_sum.equal"
condition_domain_keys = (
"batch_id",
"table",
"column_list",
"row_condition",
"condition_parser",
"ignore_row_if",
)
condition_value_keys = ("sum_total",)
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, **kwargs):
sum_total = kwargs.get("sum_total")
row_wise_cond = column_list.sum(axis=1, skipna=False) == sum_total
return row_wise_cond
@multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_list, **kwargs):
sum_total = kwargs.get("sum_total")
row_wise_cond = sum(column_list) == sum_total
return row_wise_cond
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_list, **kwargs):
sum_total = kwargs.get("sum_total")
expression = "+".join(
[f"COALESCE({column_name}, 0)" for column_name in column_list.columns]
)
row_wise_cond = F.expr(expression) == F.lit(sum_total)
return row_wise_cond
|
MulticolumnSumEqual
|
python
|
great-expectations__great_expectations
|
tests/experimental/metric_repository/test_column_filter.py
|
{
"start": 1094,
"end": 12814
}
|
class ____:
"""Test cases for ColumnFilter class."""
@pytest.mark.unit
def test_init_with_defaults(self):
"""Test initialization with default parameters."""
column_filter = ColumnFilter()
assert column_filter._include_column_names == []
assert column_filter._exclude_column_names == []
assert column_filter._include_column_name_suffixes == []
assert column_filter._exclude_column_name_suffixes == []
assert column_filter._include_semantic_types == []
assert column_filter._exclude_semantic_types == []
@pytest.mark.unit
def test_init_with_parameters(self):
"""Test initialization with specific parameters."""
column_filter = ColumnFilter(
include_column_names=["col1", "col2"],
exclude_column_names=["col3"],
include_semantic_types=[SemanticDomainTypes.NUMERIC],
exclude_semantic_types=[SemanticDomainTypes.TEXT],
)
assert column_filter._include_column_names == ["col1", "col2"]
assert column_filter._exclude_column_names == ["col3"]
assert column_filter._include_semantic_types == [SemanticDomainTypes.NUMERIC]
assert column_filter._exclude_semantic_types == [SemanticDomainTypes.TEXT]
@pytest.mark.unit
def test_normalize_semantic_types_none(self):
"""Test semantic type normalization with None."""
column_filter = ColumnFilter()
result = column_filter._normalize_semantic_types(None)
assert result == []
@pytest.mark.unit
def test_normalize_semantic_types_single(self):
"""Test semantic type normalization with single type."""
column_filter = ColumnFilter()
result = column_filter._normalize_semantic_types(SemanticDomainTypes.NUMERIC)
assert result == [SemanticDomainTypes.NUMERIC]
@pytest.mark.unit
def test_normalize_semantic_types_list(self):
"""Test semantic type normalization with list."""
column_filter = ColumnFilter()
types = [SemanticDomainTypes.NUMERIC, SemanticDomainTypes.TEXT]
result = column_filter._normalize_semantic_types(types)
assert result == types
@pytest.mark.unit
def test_get_table_column_names(self, mock_validator, sample_column_names):
"""Test getting table column names."""
mock_validator.get_metric.return_value = sample_column_names
column_filter = ColumnFilter()
result = column_filter._get_table_column_names(mock_validator)
assert result == sample_column_names
mock_validator.get_metric.assert_called_once()
@pytest.mark.unit
def test_apply_column_name_filters_no_filters(self, sample_column_names):
"""Test column name filtering with no filters applied."""
column_filter = ColumnFilter()
result = column_filter._apply_column_name_filters(sample_column_names)
assert result == sample_column_names
@pytest.mark.unit
def test_apply_column_name_filters_include_names(self, sample_column_names):
"""Test column name filtering with include names."""
column_filter = ColumnFilter(include_column_names=["id", "name"])
result = column_filter._apply_column_name_filters(sample_column_names)
assert result == ["id", "name"]
@pytest.mark.unit
def test_apply_column_name_filters_exclude_names(self, sample_column_names):
"""Test column name filtering with exclude names."""
column_filter = ColumnFilter(exclude_column_names=["id", "description"])
result = column_filter._apply_column_name_filters(sample_column_names)
expected = ["name", "age", "salary", "created_at", "is_active"]
assert result == expected
@pytest.mark.unit
def test_apply_column_name_filters_include_suffixes(self, sample_column_names):
"""Test column name filtering with include suffixes."""
column_filter = ColumnFilter(include_column_name_suffixes=["_at", "age"])
result = column_filter._apply_column_name_filters(sample_column_names)
assert result == ["age", "created_at"]
@pytest.mark.unit
def test_apply_column_name_filters_exclude_suffixes(self, sample_column_names):
"""Test column name filtering with exclude suffixes."""
column_filter = ColumnFilter(exclude_column_name_suffixes=["_at", "ion"])
result = column_filter._apply_column_name_filters(sample_column_names)
expected = ["id", "name", "age", "salary", "is_active"]
assert result == expected
@pytest.mark.unit
def test_infer_semantic_type_numeric_integer(self, sample_column_types):
"""Test semantic type inference for integer columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(sample_column_types, "id")
assert result == SemanticDomainTypes.NUMERIC
@pytest.mark.unit
def test_infer_semantic_type_numeric_decimal(self, sample_column_types):
"""Test semantic type inference for decimal columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(sample_column_types, "salary")
assert result == SemanticDomainTypes.NUMERIC
@pytest.mark.unit
def test_infer_semantic_type_text(self, sample_column_types):
"""Test semantic type inference for text columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(sample_column_types, "name")
assert result == SemanticDomainTypes.TEXT
@pytest.mark.unit
def test_infer_semantic_type_datetime(self, sample_column_types):
"""Test semantic type inference for datetime columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(
sample_column_types, "created_at"
)
assert result == SemanticDomainTypes.DATETIME
@pytest.mark.unit
def test_infer_semantic_type_boolean(self, sample_column_types):
"""Test semantic type inference for boolean columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(
sample_column_types, "is_active"
)
assert result == SemanticDomainTypes.LOGIC
@pytest.mark.unit
def test_infer_semantic_type_unknown(self, sample_column_types):
"""Test semantic type inference for unknown columns."""
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(
sample_column_types, "nonexistent"
)
assert result == SemanticDomainTypes.UNKNOWN
@pytest.mark.unit
def test_infer_semantic_type_spark_backticks(self):
"""Test semantic type inference with Spark backtick column names."""
column_types = [{"name": "`column_name`", "type": "INTEGER"}]
column_filter = ColumnFilter()
result = column_filter._infer_semantic_type_from_column_type(column_types, "column_name")
assert result == SemanticDomainTypes.NUMERIC
@pytest.mark.unit
def test_build_semantic_type_map(self, mock_validator, sample_column_types):
"""Test building semantic type map."""
mock_validator.get_metric.return_value = sample_column_types
column_filter = ColumnFilter()
column_names = ["id", "name", "created_at"]
result = column_filter._build_semantic_type_map(mock_validator, column_names)
expected = {
"id": SemanticDomainTypes.NUMERIC,
"name": SemanticDomainTypes.TEXT,
"created_at": SemanticDomainTypes.DATETIME,
}
assert result == expected
@pytest.mark.unit
def test_apply_semantic_type_filters_include(self):
"""Test semantic type filtering with include types."""
column_filter = ColumnFilter(include_semantic_types=[SemanticDomainTypes.NUMERIC])
column_names = ["id", "name", "age"]
semantic_type_map = {
"id": SemanticDomainTypes.NUMERIC,
"name": SemanticDomainTypes.TEXT,
"age": SemanticDomainTypes.NUMERIC,
}
result = column_filter._apply_semantic_type_filters(column_names, semantic_type_map)
assert result == ["id", "age"]
@pytest.mark.unit
def test_apply_semantic_type_filters_exclude(self):
"""Test semantic type filtering with exclude types."""
column_filter = ColumnFilter(exclude_semantic_types=[SemanticDomainTypes.TEXT])
column_names = ["id", "name", "age"]
semantic_type_map = {
"id": SemanticDomainTypes.NUMERIC,
"name": SemanticDomainTypes.TEXT,
"age": SemanticDomainTypes.NUMERIC,
}
result = column_filter._apply_semantic_type_filters(column_names, semantic_type_map)
assert result == ["id", "age"]
@pytest.mark.unit
def test_get_filtered_column_names_no_semantic_filtering(
self, mock_validator, sample_column_names
):
"""Test getting filtered column names without semantic filtering."""
mock_validator.get_metric.return_value = sample_column_names
column_filter = ColumnFilter(exclude_column_names=["description"])
result = column_filter.get_filtered_column_names(mock_validator)
expected = ["id", "name", "age", "salary", "created_at", "is_active"]
assert result == expected
@pytest.mark.unit
def test_get_filtered_column_names_with_semantic_filtering(
self, mock_validator, sample_column_names, sample_column_types
):
"""Test getting filtered column names with semantic filtering."""
# Mock the validator to return column names and types
def mock_get_metric(metric):
if metric.metric_name == "table.columns":
return sample_column_names
elif metric.metric_name == "table.column_types":
return sample_column_types
return None
mock_validator.get_metric.side_effect = mock_get_metric
column_filter = ColumnFilter(
include_semantic_types=[SemanticDomainTypes.NUMERIC], exclude_column_names=["id"]
)
result = column_filter.get_filtered_column_names(mock_validator)
# Should include numeric columns (age, salary) but exclude 'id'
expected = ["age", "salary"]
assert result == expected
@pytest.mark.unit
def test_get_filtered_column_names_complex_filtering(
self, mock_validator, sample_column_names, sample_column_types
):
"""Test getting filtered column names with complex filtering."""
# Mock the validator to return column names and types
def mock_get_metric(metric):
if metric.metric_name == "table.columns":
return sample_column_names
elif metric.metric_name == "table.column_types":
return sample_column_types
return None
mock_validator.get_metric.side_effect = mock_get_metric
column_filter = ColumnFilter(
include_semantic_types=[SemanticDomainTypes.NUMERIC, SemanticDomainTypes.TEXT],
exclude_column_names=["description"],
include_column_name_suffixes=["e", "y"],
)
result = column_filter.get_filtered_column_names(mock_validator)
# Should include columns ending with 'e' or 'y' that are numeric or text,
# but exclude 'description'
# name (ends with 'e', TEXT), age (ends with 'e', NUMERIC), salary (ends with 'y', NUMERIC)
expected = ["name", "age", "salary"]
assert result == expected
|
TestColumnFilter
|
python
|
streamlit__streamlit
|
lib/streamlit/errors.py
|
{
"start": 21789,
"end": 22199
}
|
class ____(LocalizableStreamlitException):
"""Exception raised when a value is not valid for a parameter."""
def __init__(self, parameter: str, valid_values: list[str]) -> None:
super().__init__(
"Invalid `{parameter}` value. Supported values: {valid_values}.",
parameter=parameter,
valid_values=", ".join(valid_values),
)
# config
|
StreamlitValueError
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT002.py
|
{
"start": 89,
"end": 157
}
|
class ____(namedtuple("foo", ["str", "int"])): # SLOT002
pass
|
Bad
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/views/private.py
|
{
"start": 24450,
"end": 24737
}
|
class ____(ProjectAdminMixin, PrivateViewMixin):
model = WebHook
lookup_url_kwarg = "webhook_pk"
form_class = WebHookForm
def get_success_url(self):
return reverse(
"projects_webhooks",
args=[self.get_project().slug],
)
|
WebHookMixin
|
python
|
spyder-ide__spyder
|
spyder/plugins/outlineexplorer/widgets.py
|
{
"start": 7585,
"end": 32728
}
|
class ____(OneColumnTree):
# Used only for debug purposes
sig_tree_updated = Signal()
sig_display_spinner = Signal()
sig_hide_spinner = Signal()
sig_update_configuration = Signal()
CONF_SECTION = 'outline_explorer'
def __init__(self, parent):
if hasattr(parent, 'CONTEXT_NAME'):
self.CONTEXT_NAME = parent.CONTEXT_NAME
self.show_fullpath = self.get_conf('show_fullpath')
self.show_all_files = self.get_conf('show_all_files')
self.group_cells = self.get_conf('group_cells')
self.show_comments = self.get_conf('show_comments')
self.sort_files_alphabetically = self.get_conf(
'sort_files_alphabetically')
self.follow_cursor = self.get_conf('follow_cursor')
self.display_variables = self.get_conf('display_variables')
super().__init__(parent)
self.freeze = False # Freezing widget to avoid any unwanted update
self.editor_items = {}
self.editor_tree_cache = {}
self.editor_ids = {}
self.update_timers = {}
self.starting = {}
self.editors_to_update = {}
self.ordered_editor_ids = []
self._current_editor = None
self._languages = []
self.is_visible = False
self._symbols_expanded_state: dict[str, bool] = {}
self.currentItemChanged.connect(self.selection_switched)
self.itemExpanded.connect(self.tree_item_expanded)
self.itemCollapsed.connect(self.tree_item_collapsed)
# ---- SpyderWidgetMixin API
# ------------------------------------------------------------------------
@property
def current_editor(self):
"""Get current editor."""
return self._current_editor
@current_editor.setter
def current_editor(self, value):
"""Set current editor and connect the necessary signals."""
if self._current_editor == value:
return
# Disconnect previous editor
self.connect_current_editor(False)
self._current_editor = value
# Connect new editor
self.connect_current_editor(True)
def __hide_or_show_root_items(self, item):
"""
show_all_files option is disabled: hide all root items except *item*
show_all_files option is enabled: do nothing
"""
for _it in self.get_top_level_items():
_it.setHidden(_it is not item and not self.show_all_files)
@on_conf_change(option='show_fullpath')
def toggle_fullpath_mode(self, state):
self.show_fullpath = state
self.setTextElideMode(Qt.ElideMiddle if state else Qt.ElideRight)
for index in range(self.topLevelItemCount()):
self.topLevelItem(index).set_text(fullpath=self.show_fullpath)
@on_conf_change(option='show_all_files')
def toggle_show_all_files(self, state):
self.show_all_files = state
current_editor = self.current_editor
if current_editor is not None:
editor_id = self.editor_ids[current_editor]
item = self.editor_items[editor_id].node
self.__hide_or_show_root_items(item)
self.__sort_toplevel_items()
if self.show_all_files is False:
self.root_item_selected(
self.editor_items[self.editor_ids[current_editor]])
self.do_follow_cursor()
@on_conf_change(option='show_comments')
def toggle_show_comments(self, state):
self.show_comments = state
self.sig_update_configuration.emit()
self.update_editors(language='python')
@on_conf_change(option='group_cells')
def toggle_group_cells(self, state):
self.group_cells = state
self.sig_update_configuration.emit()
self.update_editors(language='python')
@on_conf_change(option='display_variables')
def toggle_variables(self, state):
self.display_variables = state
for editor in self.editor_ids.keys():
self.update_editor(editor.info, editor)
@on_conf_change(option='sort_files_alphabetically')
def toggle_sort_files_alphabetically(self, state):
self.sort_files_alphabetically = state
self.__sort_toplevel_items()
@on_conf_change(option='follow_cursor')
def toggle_follow_cursor(self, state):
"""Follow the cursor."""
self.follow_cursor = state
self.do_follow_cursor()
@Slot()
def do_follow_cursor(self):
"""Go to cursor position."""
if self.follow_cursor:
self.go_to_cursor_position()
@Slot()
def go_to_cursor_position(self):
if self.current_editor is not None:
editor_id = self.editor_ids[self.current_editor]
line = self.current_editor.get_cursor_line_number()
tree = self.editor_tree_cache[editor_id]
root = self.editor_items[editor_id]
overlap = tree[line - 1]
if len(overlap) == 0:
item = root.node
self.setCurrentItem(item)
self.scrollToItem(item)
self.expandItem(item)
else:
sorted_nodes = sorted(overlap)
# The last item of the sorted elements correspond to the
# current node if expanding, otherwise it is the first stopper
# found
idx = -1
self.switch_to_node(sorted_nodes, idx)
def switch_to_node(self, sorted_nodes, idx):
"""Given a set of tree nodes, highlight the node on index `idx`."""
item_interval = sorted_nodes[idx]
item_ref = item_interval.data
item = item_ref.node
self.setCurrentItem(item)
self.scrollToItem(item)
self.expandItem(item)
def connect_current_editor(self, state):
"""Connect or disconnect the editor from signals."""
editor = self.current_editor
if editor is None:
return
# Connect syntax highlighter
sig_update = editor.sig_outline_explorer_data_changed
sig_move = editor.sig_cursor_position_changed
sig_display_spinner = editor.sig_start_outline_spinner
if state:
sig_update.connect(self.update_editor)
sig_move.connect(self.do_follow_cursor)
sig_display_spinner.connect(self.sig_display_spinner)
self.do_follow_cursor()
else:
try:
sig_update.disconnect(self.update_editor)
sig_move.disconnect(self.do_follow_cursor)
sig_display_spinner.disconnect(self.sig_display_spinner)
except TypeError:
# This catches an error while performing
# teardown in one of our tests.
pass
def clear(self):
"""Reimplemented Qt method"""
self.set_title('')
OneColumnTree.clear(self)
def set_current_editor(self, editor, update):
"""Bind editor instance"""
editor_id = editor.get_id()
# Don't fail if editor doesn't exist anymore. This
# happens when switching projects.
try:
item = self.editor_items[editor_id].node
except KeyError:
return
if not self.freeze:
self.scrollToItem(item)
self.root_item_selected(item)
self.__hide_or_show_root_items(item)
logger.debug(f"Set current editor to file {editor.fname}")
self.current_editor = editor
if (
self.is_visible
and (editor.get_language().lower() in self._languages)
and not editor.is_tree_updated
):
if editor.info is not None:
self.update_editor(editor.info)
def register_editor(self, editor):
"""
Register editor attributes and create basic objects associated
to it.
"""
editor_id = editor.get_id()
self.editor_ids[editor] = editor_id
self.ordered_editor_ids.append(editor_id)
this_root = SymbolStatus(editor.fname, None, None, editor.fname)
self.editor_items[editor_id] = this_root
root_item = FileRootItem(editor.fname, this_root,
self, editor.is_python())
this_root.node = root_item
root_item.set_text(fullpath=self.show_fullpath)
self.resizeColumnToContents(0)
if not self.show_all_files:
root_item.setHidden(True)
editor_tree = IntervalTree()
self.editor_tree_cache[editor_id] = editor_tree
self.__sort_toplevel_items()
def file_renamed(self, editor, new_filename):
"""File was renamed, updating outline explorer tree"""
if editor is None:
# This is needed when we can't find an editor to attach
# the outline explorer to.
# Fixes spyder-ide/spyder#8813.
return
editor_id = editor.get_id()
if editor_id in list(self.editor_ids.values()):
items = self.editor_items[editor_id]
# Set path for items
items.set_path(new_filename)
# Change path of root item (i.e. the file name)
root_item = items.node
root_item.set_path(new_filename, fullpath=self.show_fullpath)
# Clear and re-populate the tree again.
# Fixes spyder-ide/spyder#15517
items.delete()
editor.request_symbols()
# Resort root items
self.__sort_toplevel_items()
def update_editors(self, language):
"""
Update all editors for a given language sequentially.
This is done through a timer to avoid lags in the interface.
"""
if self.editors_to_update.get(language):
editor = self.editors_to_update[language][0]
if editor.info is not None:
# Editor could be not there anymore after switching
# projects
try:
self.update_editor(editor.info, editor)
except KeyError:
pass
self.editors_to_update[language].remove(editor)
self.update_timers[language].start()
else:
if self.starting.get(language):
logger.debug("Finish updating files at startup")
self.starting[language] = False
def update_all_editors(self, reset_info=False):
"""Update all editors with LSP support."""
for language in self._languages:
self.set_editors_to_update(language, reset_info=reset_info)
self.update_timers[language].start()
@Slot(list)
def update_editor(self, items, editor=None):
"""
Update the outline explorer for `editor` preserving the tree
state.
"""
if items is None:
return
if editor is None:
editor = self.current_editor
# Only perform an update if the widget is visible.
if not self.is_visible:
logger.debug(
f"Don't update tree of file {editor.fname} because plugin is "
f"not visible"
)
self.sig_hide_spinner.emit()
return
update = self.update_tree(items, editor)
if update:
self.save_expanded_state()
self.restore_expanded_state()
self.do_follow_cursor()
def merge_interval(self, parent, node):
"""Add node into an existing tree structure."""
match = False
start, end = node.position
while parent.parent is not None and not match:
parent_start, parent_end = parent.position
if parent_end <= start:
parent = parent.parent
else:
match = True
if node.parent is not None:
node.parent.remove_node(node)
node.parent = None
if node.node.parent is not None:
node.node.parent.remove_children(node.node)
parent.add_node(node)
node.refresh()
return node
def update_tree(self, items, editor):
"""Update tree with new items that come from the LSP."""
editor_id = editor.get_id()
language = editor.get_language()
current_tree = self.editor_tree_cache[editor_id]
root = self.editor_items[editor_id]
tree_info = []
# Create tree with items that come from the LSP
for symbol in items:
symbol_name = symbol['name']
symbol_kind = symbol['kind']
if language.lower() == 'python':
if symbol_kind == SymbolKind.MODULE:
continue
if (symbol_kind == SymbolKind.VARIABLE and
not self.display_variables):
continue
if (symbol_kind == SymbolKind.FIELD and
not self.display_variables):
continue
# NOTE: This could be also a DocumentSymbol
symbol_range = symbol['location']['range']
symbol_start = symbol_range['start']['line']
symbol_end = symbol_range['end']['line']
symbol_repr = SymbolStatus(symbol_name, symbol_kind,
(symbol_start, symbol_end), None)
if self._symbols_expanded_state.get(symbol_name):
symbol_repr.status = True
else:
self._symbols_expanded_state[symbol_name] = False
symbol_repr.status = False
tree_info.append((symbol_start, symbol_end + 1, symbol_repr))
tree = IntervalTree.from_tuples(tree_info)
# We must update the tree if the editor's root doesn't have children
# yet but we have symbols for it saved in the cache
must_update = root.node.childCount() == 0 and len(current_tree) > 0
if not must_update:
# Compare with current tree to check if it's necessary to update
# it.
if tree == current_tree:
logger.debug(
f"Current and new trees for file {editor.fname} are the "
f"same, so no update is necessary"
)
editor.is_tree_updated = True
self.sig_hide_spinner.emit()
return False
logger.debug(f"Updating tree for file {editor.fname}")
# Create nodes with new tree
for entry in sorted(tree):
entry.data.create_node()
# Remove previous tree to create the new one.
# NOTE: This is twice as fast as detecting the symbols that changed
# and updating only those in current_tree.
self.editor_items[editor_id].delete()
# Recreate tree structure
tree_copy = IntervalTree(tree)
tree_copy.merge_overlaps(
data_reducer=self.merge_interval,
data_initializer=root
)
# Save new tree and finish
self.editor_tree_cache[editor_id] = tree
editor.is_tree_updated = True
self.sig_tree_updated.emit()
self.sig_hide_spinner.emit()
return True
def remove_editor(self, editor):
if editor in self.editor_ids:
if self.current_editor is editor:
self.current_editor = None
logger.debug(f"Removing tree of file {editor.fname}")
editor_id = self.editor_ids.pop(editor)
language = editor.get_language().lower()
if editor_id in self.ordered_editor_ids:
self.ordered_editor_ids.remove(editor_id)
# Remove editor from the list that it's waiting to be updated
# because it's not necessary anymore.
if (
language in self._languages
and editor in self.editors_to_update[language]
):
self.editors_to_update[language].remove(editor)
if editor_id not in list(self.editor_ids.values()):
root_item = self.editor_items.pop(editor_id)
self.editor_tree_cache.pop(editor_id)
try:
self.takeTopLevelItem(
self.indexOfTopLevelItem(root_item.node))
except RuntimeError:
# item has already been removed
pass
def set_editor_ids_order(self, ordered_editor_ids):
"""
Order the root file items in the Outline Explorer following the
provided list of editor ids.
"""
if self.ordered_editor_ids != ordered_editor_ids:
self.ordered_editor_ids = ordered_editor_ids
if self.sort_files_alphabetically is False:
self.__sort_toplevel_items()
def __sort_toplevel_items(self):
"""
Sort the root file items in alphabetical order if
'sort_files_alphabetically' is True, else order the items as
specified in the 'self.ordered_editor_ids' list.
"""
if self.show_all_files is False:
return
current_ordered_items = [self.topLevelItem(index) for index in
range(self.topLevelItemCount())]
# Convert list to a dictionary in order to remove duplicated entries
# when having multiple editors (splitted or in new windows).
# See spyder-ide/spyder#14646
current_ordered_items_dict = {
item.path.lower(): item for item in current_ordered_items}
if self.sort_files_alphabetically:
new_ordered_items = sorted(
current_ordered_items_dict.values(),
key=lambda item: osp.basename(item.path.lower()))
else:
new_ordered_items = [
self.editor_items.get(e_id).node for e_id in
self.ordered_editor_ids if
self.editor_items.get(e_id) is not None]
# PySide <= 5.15.0 doesn’t support == and != comparison for the data
# types inside the compared lists (see [1], [2])
#
# [1] https://bugreports.qt.io/browse/PYSIDE-74
# [2] https://codereview.qt-project.org/c/pyside/pyside-setup/+/312945
update = (
(PYSIDE2 and parse(PYSIDE_VERSION) <= parse("5.15.0"))
or (current_ordered_items != new_ordered_items)
)
if update:
selected_items = self.selectedItems()
self.save_expanded_state()
for index in range(self.topLevelItemCount()):
self.takeTopLevelItem(0)
for index, item in enumerate(new_ordered_items):
self.insertTopLevelItem(index, item)
self.restore_expanded_state()
self.clearSelection()
if selected_items:
selected_items[-1].setSelected(True)
def root_item_selected(self, item):
"""Root item has been selected: expanding it and collapsing others"""
if self.show_all_files:
return
for root_item in self.get_top_level_items():
if root_item is item:
self.expandItem(root_item)
else:
self.collapseItem(root_item)
def restore(self):
"""Reimplemented OneColumnTree method"""
if self.current_editor is not None:
self.collapseAll()
editor_id = self.editor_ids[self.current_editor]
self.root_item_selected(self.editor_items[editor_id].node)
def get_root_item(self, item):
"""Return the root item of the specified item."""
root_item = item
while isinstance(root_item.parent(), QTreeWidgetItem):
root_item = root_item.parent()
return root_item
def get_visible_items(self):
"""Return a list of all visible items in the treewidget."""
items = []
iterator = QTreeWidgetItemIterator(self)
while iterator.value():
item = iterator.value()
if not item.isHidden():
if item.parent():
if item.parent().isExpanded():
items.append(item)
else:
items.append(item)
iterator += 1
return items
def on_item_activated(self, item):
"""Double-click event"""
editor_root = self.editor_items.get(
self.editor_ids.get(self.current_editor))
root_item = editor_root.node
text = ''
if isinstance(item, FileRootItem):
line = None
if id(root_item) != id(item):
root_item = item
else:
line = item.ref.position[0] + 1
text = item.ref.name
path = item.ref.path
self.freeze = True
if line:
self.parent().edit_goto.emit(path, line, text)
else:
self.parent().edit.emit(path)
self.freeze = False
for editor_id, i_item in list(self.editor_items.items()):
if i_item.path == path:
for editor, _id in list(self.editor_ids.items()):
self.current_editor = editor
break
break
def on_item_clicked(self, item):
"""Click event"""
if isinstance(item, FileRootItem):
self.root_item_selected(item)
self.on_item_activated(item)
def selection_switched(self, current_item, previous_item):
if current_item is not None:
current_ref = current_item.ref
current_ref.selected = True
if previous_item is not None:
previous_ref = previous_item.ref
previous_ref.selected = False
def tree_item_collapsed(self, item):
ref = item.ref
ref.status = False
self._symbols_expanded_state[ref.name] = False
def tree_item_expanded(self, item):
ref = item.ref
ref.status = True
self._symbols_expanded_state[ref.name] = True
def set_editors_to_update(self, language, reset_info=False):
"""Set editors to update per language."""
to_update = []
for editor in self.editor_ids.keys():
if editor.get_language().lower() == language:
to_update.append(editor)
if reset_info:
editor.info = None
self.editors_to_update[language] = to_update
def start_symbol_services(self, language):
"""Show symbols for all `language` files."""
logger.debug(f"Start symbol services for language {language}")
# Save all languages that can send info to this pane.
self._languages.append(language)
# Update all files associated to `language` through a timer
# that allows to wait a bit between updates. That doesn't block
# the interface at startup.
timer = QTimer(self)
timer.setSingleShot(True)
timer.setInterval(150)
timer.timeout.connect(lambda: self.update_editors(language))
self.update_timers[language] = timer
self.starting[language] = True
# Set editors that need to be updated per language
self.set_editors_to_update(language)
# Start timer
timer.start()
def stop_symbol_services(self, language):
"""Disable LSP symbols functionality."""
logger.debug(f"Stop symbol services for language {language}")
try:
self._languages.remove(language)
except ValueError:
pass
for editor in self.editor_ids.keys():
if editor.get_language().lower() == language:
editor.info = None
def change_visibility(self, is_visible):
"""Actions to take when the widget visibility has changed."""
self.is_visible = is_visible
if is_visible:
# Udpdate outdated trees for all LSP languages
for language in self._languages:
# Don't run this while trees are being updated after their LSP
# started.
if not self.starting[language]:
# Check which editors need to be updated
for editor in self.editor_ids.keys():
if (
editor.get_language().lower() == language
and not editor.is_tree_updated
and editor not in self.editors_to_update[language]
):
self.editors_to_update[language].append(editor)
# Update editors
if self.editors_to_update[language]:
logger.debug(
f"Updating outdated trees for {language} files "
f"because the plugin has become visible"
)
self.update_editors(language)
# Udpdate current tree if it has info available
ce = self.current_editor
if ce and ce.info and not ce.is_tree_updated:
self.update_editor(ce.info, ce)
|
OutlineExplorerTreeWidget
|
python
|
has2k1__plotnine
|
plotnine/composition/_wrap.py
|
{
"start": 96,
"end": 1447
}
|
class ____(Compose):
"""
Wrap plots or compositions into a grid
**Usage**
plot + plot
plot + composition
composition + plot
composition + composition
Typically, you will use this class through the `+` operator.
Parameters
----------
items:
The objects to be arranged (composed)
nrow:
Number of rows in the composition
ncol:
Number of cols in the composition
See Also
--------
plotnine.composition.Beside : To arrange plots side by side
plotnine.composition.Stack : To arrange plots vertically
plotnine.composition.plot_spacer : To add a blank space between plots
plotnine.composition.Compose : For more on composing plots
"""
def __add__(self, rhs):
"""
Add rhs into the wrapping composition
"""
if not isinstance(rhs, (ggplot, Compose)):
return super().__add__(rhs)
return Wrap([*self, rhs]) + self.layout
def __or__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a column
"""
from ._beside import Beside
return Beside([self, rhs])
def __truediv__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a row
"""
from ._stack import Stack
return Stack([self, rhs])
|
Wrap
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py
|
{
"start": 2508,
"end": 2562
}
|
class ____(ABC): # error
foo
|
abc_set_class_variable_4
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/data_structures/dynamic_stitch_op_test.py
|
{
"start": 9180,
"end": 13559
}
|
class ____(DynamicStitchTestBase, test.TestCase):
def __init__(self, *test_case_args):
test.TestCase.__init__(self, *test_case_args)
DynamicStitchTestBase.__init__(self, data_flow_ops.parallel_dynamic_stitch)
def testScalar(self):
with test_util.use_gpu():
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testHigherRank(self):
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, self.evaluate(grads[3:])):
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
# GPU version unit tests
def testScalarGPU(self):
indices = [constant_op.constant(0), constant_op.constant(1)]
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@test_util.run_deprecated_v1
def testHigherRankGPU(self):
indices = [
constant_op.constant(6),
constant_op.constant([4, 1]),
constant_op.constant([[5, 2], [0, 3]])
]
data = [
constant_op.constant([61, 62], dtype=dtypes.float32),
constant_op.constant([[41, 42], [11, 12]], dtype=dtypes.float32),
constant_op.constant(
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = gradients_impl.gradients(stitched_t, indices + data,
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, self.evaluate(grads[3:])):
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
@test_util.run_in_graph_and_eager_modes
def testMismatchedDataAndIndexListSizes(self):
indices = [
constant_op.constant([2]),
constant_op.constant([1]),
constant_op.constant([0]),
constant_op.constant([3]),
]
data = [
constant_op.constant([1.0]),
constant_op.constant([2.0]),
constant_op.constant([3.0]),
constant_op.constant([4.0])
]
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"expected inputs .* do not match|List argument .* must match"):
self.evaluate(data_flow_ops.dynamic_stitch(indices[0:2], data))
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"expected inputs .* do not match|List argument .* must match"):
self.evaluate(data_flow_ops.dynamic_stitch(indices, data[0:2]))
if __name__ == "__main__":
test.main()
|
ParallelDynamicStitchTest
|
python
|
plotly__plotly.py
|
_plotly_utils/basevalidators.py
|
{
"start": 68855,
"end": 69504
}
|
class ____(BaseValidator):
"""
Validator for readonly literal values
"""
def __init__(self, plotly_name, parent_name, val, **kwargs):
super(LiteralValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
self.val = val
def validate_coerce(self, v):
if v != self.val:
raise ValueError(
"""\
The '{plotly_name}' property of {parent_name} is read-only""".format(
plotly_name=self.plotly_name, parent_name=self.parent_name
)
)
else:
return v
|
LiteralValidator
|
python
|
apache__airflow
|
airflow-core/tests/unit/executors/test_executor_utils.py
|
{
"start": 1243,
"end": 5608
}
|
class ____:
@pytest.fixture
def core_executor(self):
return ExecutorName(alias=CORE_EXEC_ALIAS, module_path=CORE_EXEC_MODULE_PATH)
@pytest.fixture
def core_executor_team_name(self):
return ExecutorName(
alias=CORE_EXEC_ALIAS, module_path=CORE_EXEC_MODULE_PATH, team_name=CORE_EXEC_TEAM_NAME
)
@pytest.fixture
def custom_executor(self):
return ExecutorName(module_path=CUSTOM_EXEC_MODULE_PATH)
@pytest.fixture
def custom_executor_alias(self):
return ExecutorName(module_path=CUSTOM_EXEC_MODULE_PATH, alias=CUSTOM_EXEC_ALIAS)
@pytest.fixture
def custom_executor_team_name(self):
return ExecutorName(module_path=CUSTOM_EXEC_MODULE_PATH, team_name=CUSTOM_EXEC_TEAM_NAME)
@pytest.fixture
def custom_executor_team_name_alias(self):
return ExecutorName(
module_path=CUSTOM_EXEC_MODULE_PATH, alias=CUSTOM_EXEC_ALIAS, team_name=CUSTOM_EXEC_TEAM_NAME
)
def test_initialization(
self,
core_executor,
core_executor_team_name,
custom_executor,
custom_executor_team_name,
custom_executor_alias,
custom_executor_team_name_alias,
):
assert core_executor.module_path == CORE_EXEC_MODULE_PATH
assert core_executor.alias is CORE_EXEC_ALIAS
assert core_executor.team_name is None
assert core_executor.connector_source == ConnectorSource.CORE
assert core_executor_team_name.module_path == CORE_EXEC_MODULE_PATH
assert core_executor_team_name.alias is CORE_EXEC_ALIAS
assert core_executor_team_name.team_name == CORE_EXEC_TEAM_NAME
assert core_executor_team_name.connector_source == ConnectorSource.CORE
assert custom_executor.module_path == CUSTOM_EXEC_MODULE_PATH
assert custom_executor.alias is None
assert custom_executor.team_name is None
assert custom_executor.connector_source == ConnectorSource.CUSTOM_PATH
assert custom_executor_team_name.module_path == CUSTOM_EXEC_MODULE_PATH
assert custom_executor_team_name.alias is None
assert custom_executor_team_name.team_name == CUSTOM_EXEC_TEAM_NAME
assert custom_executor_team_name.connector_source == ConnectorSource.CUSTOM_PATH
assert custom_executor_alias.module_path == CUSTOM_EXEC_MODULE_PATH
assert custom_executor_alias.alias == CUSTOM_EXEC_ALIAS
assert custom_executor_alias.team_name is None
assert custom_executor_alias.connector_source == ConnectorSource.CUSTOM_PATH
assert custom_executor_team_name_alias.module_path == CUSTOM_EXEC_MODULE_PATH
assert custom_executor_team_name_alias.alias == CUSTOM_EXEC_ALIAS
assert custom_executor_team_name_alias.team_name == CUSTOM_EXEC_TEAM_NAME
assert custom_executor_team_name_alias.connector_source == ConnectorSource.CUSTOM_PATH
def test_repr_all(self, core_executor, core_executor_team_name, custom_executor_team_name_alias):
assert repr(core_executor) == f":{CORE_EXEC_ALIAS}:"
assert repr(core_executor_team_name) == f"{CORE_EXEC_TEAM_NAME}:{CORE_EXEC_ALIAS}:"
assert (
repr(custom_executor_team_name_alias)
== f"{CUSTOM_EXEC_TEAM_NAME}:{CUSTOM_EXEC_ALIAS}:{CUSTOM_EXEC_MODULE_PATH}"
)
def test_eq_same(self, core_executor_team_name):
compare_exec = ExecutorName(
alias=CORE_EXEC_ALIAS, module_path=CORE_EXEC_MODULE_PATH, team_name=CORE_EXEC_TEAM_NAME
)
assert core_executor_team_name == compare_exec
def test_eq_different(self, core_executor, core_executor_team_name, custom_executor_team_name):
assert core_executor != core_executor_team_name
assert core_executor_team_name != custom_executor_team_name
def test_hash_same(self, core_executor_team_name):
compare_exec = ExecutorName(
alias=CORE_EXEC_ALIAS, module_path=CORE_EXEC_MODULE_PATH, team_name=CORE_EXEC_TEAM_NAME
)
assert hash(core_executor_team_name) == hash(compare_exec)
def test_hash_different(self, core_executor, core_executor_team_name, custom_executor_team_name_alias):
assert hash(core_executor) != hash(core_executor_team_name)
assert hash(core_executor_team_name) != hash(custom_executor_team_name_alias)
|
TestExecutorName
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 8435,
"end": 8538
}
|
class ____(admin.ModelAdmin):
raw_id_fields = ("inquisition", "defendant0", "defendant1")
|
SketchAdmin
|
python
|
langchain-ai__langchain
|
libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py
|
{
"start": 5813,
"end": 8068
}
|
class ____:
"""Test Grep content search."""
def test_grep_files_with_matches_mode(self) -> None:
"""Test grep with files_with_matches output mode."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["def foo():", " pass"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/utils.py": {
"content": ["def bar():", " return None"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/README.md": {
"content": ["# Documentation", "No code here"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"def \w+\(\):",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/src/utils.py" in result
assert "/README.md" not in result
# Should only have file paths, not line content
def test_grep_invalid_include_pattern(self) -> None:
"""Return error when include glob is invalid."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["def foo():"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
result = middleware._handle_grep_search(
pattern=r"def",
path="/",
include="*.{py",
output_mode="files_with_matches",
state=state,
)
assert result == "Invalid include pattern"
|
TestGrepSearch
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/io_manager.py
|
{
"start": 9524,
"end": 10947
}
|
class ____:
def __init__(
self,
config_schema: CoercableToConfigSchema = None,
description: Optional[str] = None,
output_config_schema: CoercableToConfigSchema = None,
input_config_schema: CoercableToConfigSchema = None,
required_resource_keys: Optional[set[str]] = None,
version: Optional[str] = None,
):
# type validation happens in IOManagerDefinition
self.config_schema = config_schema
self.description = description
self.required_resource_keys = required_resource_keys
self.version = version
self.output_config_schema = output_config_schema
self.input_config_schema = input_config_schema
def __call__(self, fn: IOManagerFunction) -> IOManagerDefinition:
check.callable_param(fn, "fn")
io_manager_def = IOManagerDefinition(
resource_fn=fn,
config_schema=self.config_schema,
description=self.description,
required_resource_keys=self.required_resource_keys,
version=self.version,
output_config_schema=self.output_config_schema,
input_config_schema=self.input_config_schema,
)
# `update_wrapper` typing cannot currently handle a Union of Callables correctly
update_wrapper(io_manager_def, wrapped=fn) # type: ignore
return io_manager_def
|
_IOManagerDecoratorCallable
|
python
|
google__pytype
|
pytype/metrics.py
|
{
"start": 4159,
"end": 5102
}
|
class ____(metaclass=_RegistryMeta):
"""Abstract base class for metrics."""
def __init__(self, name):
"""Initialize the metric and register it under the specified name."""
if name is None:
# We do not want to register this metric (e.g. we are deserializing a
# metric from file and need to merge it into the existing metric with the
# same name.)
return
_validate_metric_name(name)
if name in _registered_metrics:
raise ValueError(f"Metric {name} has already been defined.")
self._name = name
_registered_metrics[name] = self
@property
def name(self):
return self._name
def _summary(self):
"""Return a string summarizing the value of the metric."""
raise NotImplementedError
def _merge(self, other):
"""Merge data from another metric of the same type."""
raise NotImplementedError
def __str__(self):
return f"{self._name}: {self._summary()}"
|
Metric
|
python
|
pytest-dev__pytest-django
|
tests/test_unittest.py
|
{
"start": 2356,
"end": 4262
}
|
class ____:
"""Django test tags are only converted to Pytest markers if actually
Django tests. Use pytest markers directly for pytest tests."""
@pytest.fixture(autouse=True)
def gimme_my_markers(self, request: pytest.FixtureRequest) -> None:
self.markers = {m.name for m in request.node.iter_markers()}
@tag("tag2") # type: ignore[misc]
def test_1(self) -> None:
assert not self.markers
def test_sole_test(django_pytester: DjangoPytester) -> None:
"""
Make sure the database is configured when only Django TestCase classes
are collected, without the django_db marker.
Also ensures that the DB is available after a failure (#824).
"""
django_pytester.create_test_module(
"""
import os
from django.test import TestCase
from django.conf import settings
from .app.models import Item
class TestFoo(TestCase):
def test_foo(self):
# Make sure we are actually using the test database
_, db_name = os.path.split(settings.DATABASES['default']['NAME'])
assert db_name.startswith('test_') or db_name == ':memory:' \\
or 'file:memorydb' in db_name
# Make sure it is usable
assert Item.objects.count() == 0
assert 0, "trigger_error"
class TestBar(TestCase):
def test_bar(self):
assert Item.objects.count() == 0
"""
)
result = django_pytester.runpytest_subprocess("-v")
result.stdout.fnmatch_lines(
[
"*::test_foo FAILED",
"*::test_bar PASSED",
'> assert 0, "trigger_error"',
"E AssertionError: trigger_error",
"E assert 0",
"*= 1 failed, 1 passed*",
]
)
assert result.ret == 1
|
TestNonDjangoClassWithTags
|
python
|
Pylons__pyramid
|
tests/test_traversal.py
|
{
"start": 44051,
"end": 45565
}
|
class ____(unittest.TestCase):
def _callFUT(self, tup):
from pyramid.traversal import _join_path_tuple
return _join_path_tuple(tup)
def test_empty_tuple(self):
# tests "or '/'" case
result = self._callFUT(())
self.assertEqual(result, '/')
def test_nonempty_tuple(self):
result = self._callFUT(('x',))
self.assertEqual(result, 'x')
def test_segments_with_unsafes(self):
safe_segments = tuple(
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
"-._~!$&'()*+,;=:@"
)
result = self._callFUT(safe_segments)
self.assertEqual(result, '/'.join(safe_segments))
unsafe_segments = tuple(
chr(i) for i in range(0x20, 0x80) if not chr(i) in safe_segments
) + ('あ',)
result = self._callFUT(unsafe_segments)
self.assertEqual(
result,
'/'.join(
''.join(
'%%%02X' % (ord(c) if isinstance(c, str) else c)
for c in unsafe_segment.encode('utf-8')
)
for unsafe_segment in unsafe_segments
),
)
def make_traverser(result):
class DummyTraverser:
def __init__(self, context):
self.context = context
context.wascontext = True
def __call__(self, request):
self.context.request = request
return result
return DummyTraverser
|
Test__join_path_tuple
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/stateful.py
|
{
"start": 20702,
"end": 22473
}
|
class ____:
targets: Any
function: Any
arguments: Any
preconditions: Any
bundles: tuple["Bundle", ...] = field(init=False)
_cached_hash: int | None = field(init=False, default=None)
_cached_repr: str | None = field(init=False, default=None)
arguments_strategies: dict[Any, Any] = field(init=False, default_factory=dict)
def __post_init__(self):
bundles = []
for k, v in sorted(self.arguments.items()):
assert not isinstance(v, BundleReferenceStrategy)
if isinstance(v, Bundle):
bundles.append(v)
consume = isinstance(v, BundleConsumer)
v = BundleReferenceStrategy(v.name, consume=consume)
self.arguments_strategies[k] = v
self.bundles = tuple(bundles)
def __repr__(self) -> str:
if self._cached_repr is None:
bits = [
f"{field.name}="
f"{get_pretty_function_description(getattr(self, field.name))}"
for field in dataclasses.fields(self)
if getattr(self, field.name)
]
self._cached_repr = f"{self.__class__.__name__}({', '.join(bits)})"
return self._cached_repr
def __hash__(self):
# sampled_from uses hash in calc_label, and we want this to be fast when
# sampling stateful rules, so we cache here.
if self._cached_hash is None:
self._cached_hash = hash(
(
self.targets,
self.function,
tuple(self.arguments.items()),
self.preconditions,
self.bundles,
)
)
return self._cached_hash
self_strategy = st.runner()
|
Rule
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/end.py
|
{
"start": 283,
"end": 1921
}
|
class ____(Activation):
"""Activation that finishes the flow process."""
@Activation.status.transition(
source=STATUS.DONE,
target=STATUS.CANCELED,
conditions=[process_not_cancelled],
permission=has_manage_permission,
)
def undo(self):
self.process.finished = None
self.process.status = PROCESS.NEW
self.process.save()
super().undo.original()
@Activation.status.super()
def activate(self):
"""
Finalize the flow. If there is no active task, process marked as finished.
"""
with transaction.atomic(savepoint=True), self.exception_guard():
self.task.started = now()
task_started.send(
sender=self.flow_class, process=self.process, task=self.task
)
active_tasks_count = (
self.flow_class.task_class._default_manager.filter(
process=self.process, finished__isnull=True
).exclude(pk=self.task.pk)
).count()
if active_tasks_count == 0:
self.process.status = STATUS.DONE
self.process.finished = now()
self.process.save()
task_finished.send(
sender=self.flow_class, process=self.process, task=self.task
)
if active_tasks_count == 0:
flow_finished.send(
sender=self.flow_class, process=self.process, task=self.task
)
@Activation.status.super()
def create_next(self):
"""Do nothing"""
return []
|
EndActivation
|
python
|
getsentry__sentry
|
src/sentry/auth/authenticators/base.py
|
{
"start": 1079,
"end": 1241
}
|
class ____(ActivationResult):
type = "challenge"
def __init__(self, challenge: bytes) -> None:
self.challenge = challenge
|
ActivationChallengeResult
|
python
|
wandb__wandb
|
wandb/docker/__init__.py
|
{
"start": 193,
"end": 8663
}
|
class ____(Error):
"""Raised when attempting to execute a docker command."""
def __init__(
self,
command_launched: List[str],
return_code: int,
stdout: Optional[bytes] = None,
stderr: Optional[bytes] = None,
) -> None:
command_launched_str = " ".join(command_launched)
error_msg = (
f"The docker command executed was `{command_launched_str}`.\n"
f"It returned with code {return_code}\n"
)
if stdout is not None:
error_msg += f"The content of stdout is '{stdout.decode()}'\n"
else:
error_msg += (
"The content of stdout can be found above the "
"stacktrace (it wasn't captured).\n"
)
if stderr is not None:
error_msg += f"The content of stderr is '{stderr.decode()}'\n"
else:
error_msg += (
"The content of stderr can be found above the "
"stacktrace (it wasn't captured)."
)
super().__init__(error_msg)
entrypoint = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "wandb-entrypoint.sh"
)
log = logging.getLogger(__name__)
def shell(cmd: List[str]) -> Optional[str]:
"""Simple wrapper for calling docker,.
returning None on error and the output on success
"""
try:
return (
subprocess.check_output(["docker"] + cmd, stderr=subprocess.STDOUT)
.decode("utf8")
.strip()
)
except subprocess.CalledProcessError as e:
print(e) # noqa: T201
return None
_buildx_installed = None
def is_buildx_installed() -> bool:
"""Return `True` if docker buildx is installed and working."""
global _buildx_installed
if _buildx_installed is not None:
return _buildx_installed # type: ignore
if not shutil.which("docker"):
_buildx_installed = False
else:
help_output = shell(["buildx", "--help"])
_buildx_installed = help_output is not None and "buildx" in help_output
return _buildx_installed
def is_docker_installed() -> bool:
"""Return `True` if docker is installed and working, else `False`."""
try:
# Run the docker --version command
result = subprocess.run(
["docker", "--version"],
capture_output=True,
)
if result.returncode == 0:
return True
else:
return False
except FileNotFoundError:
# If docker command is not found
return False
def build(
tags: List[str], file: str, context_path: str, platform: Optional[str] = None
) -> str:
use_buildx = is_buildx_installed()
command = ["buildx", "build"] if use_buildx else ["build"]
command += ["--load"] if should_add_load_argument(platform) and use_buildx else []
if platform:
command += ["--platform", platform]
build_tags = []
for tag in tags:
build_tags += ["-t", tag]
args = ["docker"] + command + build_tags + ["-f", file, context_path]
stdout = run_command_live_output(
args,
)
return stdout
def should_add_load_argument(platform: Optional[str]) -> bool:
# the load option does not work when multiple platforms are specified:
# https://github.com/docker/buildx/issues/59
if platform is None or (platform and "," not in platform):
return True
return False
def run_command_live_output(args: List[Any]) -> str:
with subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
) as process:
stdout = ""
while True:
chunk = os.read(process.stdout.fileno(), 4096) # type: ignore
if not chunk:
break
index = chunk.find(b"\r")
if index != -1:
print(chunk.decode(), end="") # noqa: T201
else:
stdout += chunk.decode()
print(chunk.decode(), end="\r") # noqa: T201
print(stdout) # noqa: T201
return_code = process.wait()
if return_code != 0:
raise DockerError(args, return_code, stdout.encode())
return stdout
def run(
args: List[Any],
capture_stdout: bool = True,
capture_stderr: bool = True,
input: Optional[bytes] = None,
return_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Union[str, Tuple[str, str]]:
args = [str(x) for x in args]
subprocess_env = dict(os.environ)
subprocess_env.update(env or {})
if args[1] == "buildx":
subprocess_env["DOCKER_CLI_EXPERIMENTAL"] = "enabled"
stdout_dest: Optional[int] = subprocess.PIPE if capture_stdout else None
stderr_dest: Optional[int] = subprocess.PIPE if capture_stderr else None
completed_process = subprocess.run(
args, input=input, stdout=stdout_dest, stderr=stderr_dest, env=subprocess_env
)
if completed_process.returncode != 0:
raise DockerError(
args,
completed_process.returncode,
completed_process.stdout,
completed_process.stderr,
)
if return_stderr:
return (
_post_process_stream(completed_process.stdout),
_post_process_stream(completed_process.stderr),
)
else:
return _post_process_stream(completed_process.stdout)
def _post_process_stream(stream: Optional[bytes]) -> str:
if stream is None:
return ""
decoded_stream = stream.decode()
if len(decoded_stream) != 0 and decoded_stream[-1] == "\n":
decoded_stream = decoded_stream[:-1]
return decoded_stream
def default_image(gpu: bool = False) -> str:
tag = "all"
if not gpu:
tag += "-cpu"
return f"wandb/deepo:{tag}"
def parse_repository_tag(repo_name: str) -> Tuple[str, Optional[str]]:
parts = repo_name.rsplit("@", 1)
if len(parts) == 2:
return parts[0], parts[1]
parts = repo_name.rsplit(":", 1)
if len(parts) == 2 and "/" not in parts[1]:
return parts[0], parts[1]
return repo_name, None
def parse(image_name: str) -> Tuple[str, str, str]:
repository, tag = parse_repository_tag(image_name)
registry, repo_name = names.resolve_repository_name(repository)
if registry == "docker.io":
registry = "index.docker.io"
return registry, repo_name, (tag or "latest")
def image_id_from_registry(image_name: str) -> Optional[str]:
"""Query the image manifest to get its full ID including the digest.
Args:
image_name: The image name, such as "wandb/local".
Returns:
The image name followed by its digest, like "wandb/local@sha256:...".
"""
# https://docs.docker.com/reference/cli/docker/buildx/imagetools/inspect
inspect_cmd = ["buildx", "imagetools", "inspect", image_name]
format_args = ["--format", r"{{.Name}}@{{.Manifest.Digest}}"]
return shell([*inspect_cmd, *format_args])
def image_id(image_name: str) -> Optional[str]:
"""Retrieve the image id from the local docker daemon or remote registry."""
if "@sha256:" in image_name:
return image_name
else:
digests = shell(["inspect", image_name, "--format", "{{json .RepoDigests}}"])
if digests is None:
return image_id_from_registry(image_name)
try:
return json.loads(digests)[0]
except (ValueError, IndexError):
return image_id_from_registry(image_name)
def get_image_uid(image_name: str) -> int:
"""Retrieve the image default uid through brute force."""
image_uid = shell(["run", image_name, "id", "-u"])
return int(image_uid) if image_uid else -1
def push(image: str, tag: str) -> Optional[str]:
"""Push an image to a remote registry."""
return shell(["push", f"{image}:{tag}"])
def login(username: str, password: str, registry: str) -> Optional[str]:
"""Login to a registry."""
return shell(["login", "--username", username, "--password", password, registry])
def tag(image_name: str, tag: str) -> Optional[str]:
"""Tag an image."""
return shell(["tag", image_name, tag])
__all__ = [
"shell",
"build",
"run",
"image_id",
"image_id_from_registry",
"is_docker_installed",
"parse",
"parse_repository_tag",
"default_image",
"get_image_uid",
"push",
"login",
"tag",
]
|
DockerError
|
python
|
great-expectations__great_expectations
|
great_expectations/metrics/column/values_non_null.py
|
{
"start": 283,
"end": 424
}
|
class ____(ColumnMetric[ColumnValuesNonNullResult]):
name = f"column_values.nonnull.{MetricNameSuffix.CONDITION.value}"
|
ColumnValuesNonNull
|
python
|
kamyu104__LeetCode-Solutions
|
Python/circular-permutation-in-binary-representation.py
|
{
"start": 31,
"end": 254
}
|
class ____(object):
def circularPermutation(self, n, start):
"""
:type n: int
:type start: int
:rtype: List[int]
"""
return [start ^ (i>>1) ^ i for i in xrange(1<<n)]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/saving/saved_model/model_serialization.py
|
{
"start": 1029,
"end": 2504
}
|
class ____(layer_serialization.LayerSavedModelSaver):
"""Model SavedModel serialization."""
@property
def object_identifier(self):
return constants.MODEL_IDENTIFIER
def _python_properties_internal(self):
metadata = super(ModelSavedModelSaver, self)._python_properties_internal()
# Network stateful property is dependent on the child layers.
metadata.pop('stateful')
metadata['is_graph_network'] = self.obj._is_graph_network # pylint: disable=protected-access
metadata['save_spec'] = self.obj._get_save_spec(dynamic_batch=False) # pylint: disable=protected-access
metadata.update(
saving_utils.model_metadata(
self.obj, include_optimizer=True, require_config=False))
return metadata
def _get_serialized_attributes_internal(self, serialization_cache):
default_signature = None
# Create a default signature function if this is the only object in the
# cache (i.e. this is the root level object).
if len(serialization_cache[constants.KERAS_CACHE_KEY]) == 1:
default_signature = save_impl.default_save_signature(self.obj)
# Other than the default signature function, all other attributes match with
# the ones serialized by Layer.
objects, functions = (
super(ModelSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
functions['_default_save_signature'] = default_signature
return objects, functions
|
ModelSavedModelSaver
|
python
|
pandas-dev__pandas
|
setup.py
|
{
"start": 2831,
"end": 5388
}
|
class ____(Command):
"""Custom command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self) -> None:
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin("pandas", "_libs", "src")
parser = pjoin(base, "parser")
vendored = pjoin(base, "vendored")
dt = pjoin(base, "datetime")
ujson_python = pjoin(vendored, "ujson", "python")
ujson_lib = pjoin(vendored, "ujson", "lib")
self._clean_exclude = [
pjoin(vendored, "numpy", "datetime", "np_datetime.c"),
pjoin(vendored, "numpy", "datetime", "np_datetime_strings.c"),
pjoin(dt, "date_conversions.c"),
pjoin(parser, "tokenizer.c"),
pjoin(parser, "io.c"),
pjoin(ujson_python, "ujson.c"),
pjoin(ujson_python, "objToJSON.c"),
pjoin(ujson_python, "JSONtoObj.c"),
pjoin(ujson_lib, "ultrajsonenc.c"),
pjoin(ujson_lib, "ultrajsondec.c"),
pjoin(dt, "pd_datetime.c"),
pjoin(parser, "pd_parser.c"),
]
for root, dirs, files in os.walk("pandas"):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in (
".pyc",
".so",
".o",
".pyo",
".pyd",
".c",
".cpp",
".orig",
):
self._clean_me.append(filepath)
self._clean_trees.append(pjoin(root, d) for d in dirs if d == "__pycache__")
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile_replaced = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile_replaced)
self._clean_trees.append(d for d in ("build", "dist") if os.path.exists(d))
def finalize_options(self) -> None:
pass
def run(self) -> None:
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except OSError:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except OSError:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass["sdist"]
|
CleanCommand
|
python
|
astropy__astropy
|
astropy/convolution/kernels.py
|
{
"start": 764,
"end": 2778
}
|
class ____(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : int, optional
Size of the kernel array. Default = ⌊8*stddev+1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1.0 / (np.sqrt(2 * np.pi) * stddev), 0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self.normalize()
|
Gaussian1DKernel
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1beta1_service_cidr_status.py
|
{
"start": 383,
"end": 3794
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1Condition]'
}
attribute_map = {
'conditions': 'conditions'
}
def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ServiceCIDRStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
@property
def conditions(self):
"""Gets the conditions of this V1beta1ServiceCIDRStatus. # noqa: E501
conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state # noqa: E501
:return: The conditions of this V1beta1ServiceCIDRStatus. # noqa: E501
:rtype: list[V1Condition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1beta1ServiceCIDRStatus.
conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state # noqa: E501
:param conditions: The conditions of this V1beta1ServiceCIDRStatus. # noqa: E501
:type: list[V1Condition]
"""
self._conditions = conditions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ServiceCIDRStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ServiceCIDRStatus):
return True
return self.to_dict() != other.to_dict()
|
V1beta1ServiceCIDRStatus
|
python
|
pikepdf__pikepdf
|
tests/test_pdf.py
|
{
"start": 3190,
"end": 4440
}
|
class ____:
def test_some_permissions_missing(self, resources):
with Pdf.open(resources / 'graph-encrypted.pdf', password='owner') as pdf:
assert not pdf.allow.print_highres
assert not pdf.allow.modify_annotation
assert pdf.allow.print_lowres
def test_all_true_not_encrypted(self, trivial):
assert all(trivial.allow)
def test_omit_encryption_removes_encryption(self, resources, outdir):
with Pdf.open(resources / 'graph-encrypted.pdf', password='owner') as pdf:
pdf.save(outdir / 'true.pdf')
with Pdf.open(outdir / 'true.pdf') as pdf_copy:
assert pdf.allow != pdf_copy.allow
@pytest.mark.parametrize('encryption, expected', [[True, True], [False, False]])
@pytest.mark.filterwarnings('ignore:A password was provided')
def test_permissions_preserved_on_save(
self, resources, outdir, encryption, expected
):
with Pdf.open(resources / 'graph-encrypted.pdf', password='owner') as pdf:
pdf.save(outdir / 'true.pdf', encryption=encryption)
with Pdf.open(outdir / 'true.pdf', password='owner') as pdf_copy:
assert (pdf.allow == pdf_copy.allow) == expected
|
TestPermissions
|
python
|
getsentry__sentry
|
tests/acceptance/test_explore_spans.py
|
{
"start": 409,
"end": 2900
}
|
class ____(AcceptanceTestCase, SpanTestCase, SnubaTestCase):
viewname = "sentry-api-0-organization-events"
def setUp(self) -> None:
super().setUp()
self.start = self.day_ago = before_now(days=1).replace(
hour=10, minute=0, second=0, microsecond=0
)
self.start_minus_one_minute = self.start - timedelta(minutes=1)
self.start_minus_two_minutes = self.start - timedelta(minutes=2)
self.organization = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.organization, name="Mariachi Band", members=[self.user]
)
self.project = self.create_project(
organization=self.organization, teams=[self.team], name="Bengal"
)
self.login_as(self.user)
self.page = ExploreSpansPage(self.browser, self.client)
self.dismiss_assistant(which="tour.explore.spans")
@patch("django.utils.timezone.now")
def test_spans_table_loads_all_events(self, mock_now: MagicMock) -> None:
mock_now.return_value = self.start
assert (
self.browser.driver.get_window_size().get("width") == 1680
) # This test makes assertions based on the current default window size.
with self.feature(FEATURE_FLAGS):
spans = [
self.create_span(
{"description": "foo", "sentry_tags": {"status": "success"}},
start_ts=self.start_minus_one_minute,
),
self.create_span(
{
"description": "bar",
"sentry_tags": {"status": "invalid_argument"},
},
start_ts=self.start_minus_two_minutes,
),
]
self.store_spans(
spans,
is_eap=True,
)
self.page.visit_explore_spans(self.organization.slug)
for span in spans:
span_row = self.page.get_spans_row_with_id(span["span_id"][:8])
column_objects = self.page.get_spans_row_columns(span_row)
row_text = [element.text for element in column_objects]
# Just checking that the attrs of the span are here so test isn't dependent on the order of columns
assert span["span_id"][:8] in row_text
assert span["description"] in row_text
|
ExploreSpansTest
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modular_emu3.py
|
{
"start": 1612,
"end": 3075
}
|
class ____(LlamaDecoderLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
|
Emu3DecoderLayer
|
python
|
psf__black
|
tests/test_black.py
|
{
"start": 119288,
"end": 124034
}
|
class ____(BlackBaseTestCase):
def check_ast_equivalence(
self, source: str, dest: str, *, should_fail: bool = False
) -> None:
# If we get a failure, make sure it's not because the code itself
# is invalid, since that will also cause assert_equivalent() to throw
# ASTSafetyError.
source = textwrap.dedent(source)
dest = textwrap.dedent(dest)
black.parse_ast(source)
black.parse_ast(dest)
if should_fail:
with self.assertRaises(ASTSafetyError):
black.assert_equivalent(source, dest)
else:
black.assert_equivalent(source, dest)
def test_assert_equivalent_basic(self) -> None:
self.check_ast_equivalence("{}", "None", should_fail=True)
self.check_ast_equivalence("1+2", "1 + 2")
self.check_ast_equivalence("hi # comment", "hi")
def test_assert_equivalent_del(self) -> None:
self.check_ast_equivalence("del (a, b)", "del a, b")
def test_assert_equivalent_strings(self) -> None:
self.check_ast_equivalence('x = "x"', 'x = " x "', should_fail=True)
self.check_ast_equivalence(
'''
"""docstring """
''',
'''
"""docstring"""
''',
)
self.check_ast_equivalence(
'''
"""docstring """
''',
'''
"""ddocstring"""
''',
should_fail=True,
)
self.check_ast_equivalence(
'''
class A:
"""
docstring
"""
''',
'''
class A:
"""docstring"""
''',
)
self.check_ast_equivalence(
"""
def f():
" docstring "
""",
'''
def f():
"""docstring"""
''',
)
self.check_ast_equivalence(
"""
async def f():
" docstring "
""",
'''
async def f():
"""docstring"""
''',
)
self.check_ast_equivalence(
"""
if __name__ == "__main__":
" docstring-like "
""",
'''
if __name__ == "__main__":
"""docstring-like"""
''',
)
self.check_ast_equivalence(r'def f(): r" \n "', r'def f(): "\\n"')
self.check_ast_equivalence('try: pass\nexcept: " x "', 'try: pass\nexcept: "x"')
self.check_ast_equivalence(
'def foo(): return " x "', 'def foo(): return "x"', should_fail=True
)
def test_assert_equivalent_fstring(self) -> None:
major, minor = sys.version_info[:2]
if major < 3 or (major == 3 and minor < 12):
pytest.skip("relies on 3.12+ syntax")
# https://github.com/psf/black/issues/4268
self.check_ast_equivalence(
"""print(f"{"|".join([a,b,c])}")""",
"""print(f"{" | ".join([a,b,c])}")""",
should_fail=True,
)
self.check_ast_equivalence(
"""print(f"{"|".join(['a','b','c'])}")""",
"""print(f"{" | ".join(['a','b','c'])}")""",
should_fail=True,
)
def test_equivalency_ast_parse_failure_includes_error(self) -> None:
with pytest.raises(ASTSafetyError) as err:
black.assert_equivalent("a«»a = 1", "a«»a = 1")
err.match("--safe")
# Unfortunately the SyntaxError message has changed in newer versions so we
# can't match it directly.
err.match("invalid character")
err.match(r"\(<unknown>, line 1\)")
try:
with open(black.__file__, encoding="utf-8") as _bf:
black_source_lines = _bf.readlines()
except UnicodeDecodeError:
if not black.COMPILED:
raise
def tracefunc(
frame: types.FrameType, event: str, arg: Any
) -> Callable[[types.FrameType, str, Any], Any]:
"""Show function calls `from black/__init__.py` as they happen.
Register this with `sys.settrace()` in a test you're debugging.
"""
if event != "call":
return tracefunc
stack = len(inspect.stack()) - 19
stack *= 2
filename = frame.f_code.co_filename
lineno = frame.f_lineno
func_sig_lineno = lineno - 1
funcname = black_source_lines[func_sig_lineno].strip()
while funcname.startswith("@"):
func_sig_lineno += 1
funcname = black_source_lines[func_sig_lineno].strip()
if "black/__init__.py" in filename:
print(f"{' ' * stack}{lineno}:{funcname}")
return tracefunc
|
TestASTSafety
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_deadline.py
|
{
"start": 7525,
"end": 18687
}
|
class ____:
@staticmethod
def setup_method():
_clean_db()
@staticmethod
def teardown_method():
_clean_db()
@pytest.mark.parametrize(
("column", "conditions", "expected_query"),
[
pytest.param(
DagRun.logical_date,
{"dag_id": DAG_ID},
"SELECT dag_run.logical_date \nFROM dag_run \nWHERE dag_run.dag_id = :dag_id_1",
id="single_condition_logical_date",
),
pytest.param(
DagRun.queued_at,
{"dag_id": DAG_ID},
"SELECT dag_run.queued_at \nFROM dag_run \nWHERE dag_run.dag_id = :dag_id_1",
id="single_condition_queued_at",
),
pytest.param(
DagRun.logical_date,
{"dag_id": DAG_ID, "state": "running"},
"SELECT dag_run.logical_date \nFROM dag_run \nWHERE dag_run.dag_id = :dag_id_1 AND dag_run.state = :state_1",
id="multiple_conditions",
),
],
)
@mock.patch("sqlalchemy.orm.Session")
def test_fetch_from_db_success(self, mock_session, column, conditions, expected_query):
"""Test successful database queries."""
mock_session.scalar.return_value = DEFAULT_DATE
result = _fetch_from_db(column, session=mock_session, **conditions)
assert isinstance(result, datetime)
mock_session.scalar.assert_called_once()
# Check that the correct query was constructed
call_args = mock_session.scalar.call_args[0][0]
assert str(call_args) == expected_query
# Verify the actual parameter values
compiled = call_args.compile()
for key, value in conditions.items():
# Note that SQLAlchemy appends the _1 to ensure unique template field names
assert compiled.params[f"{key}_1"] == value
@pytest.mark.parametrize(
("use_valid_conditions", "scalar_side_effect", "expected_error", "expected_message"),
[
pytest.param(
False,
mock.DEFAULT, # This will allow the call to pass through
AttributeError,
None,
id="invalid_attribute",
),
pytest.param(
True,
SQLAlchemyError("Database connection failed"),
SQLAlchemyError,
"Database connection failed",
id="database_error",
),
pytest.param(
True, lambda x: None, ValueError, "No matching record found in the database", id="no_results"
),
],
)
@mock.patch("sqlalchemy.orm.Session")
def test_fetch_from_db_error_cases(
self, mock_session, use_valid_conditions, scalar_side_effect, expected_error, expected_message
):
"""Test database access error handling."""
model_reference = DagRun.logical_date
conditions = {"dag_id": "test_dag"} if use_valid_conditions else {"non_existent_column": "some_value"}
# Configure mock session
mock_session.scalar.side_effect = scalar_side_effect
with pytest.raises(expected_error, match=expected_message):
_fetch_from_db(model_reference, session=mock_session, **conditions)
@pytest.mark.parametrize(
("reference", "expected_column"),
[
pytest.param(DeadlineReference.DAGRUN_LOGICAL_DATE, DagRun.logical_date, id="logical_date"),
pytest.param(DeadlineReference.DAGRUN_QUEUED_AT, DagRun.queued_at, id="queued_at"),
pytest.param(DeadlineReference.FIXED_DATETIME(DEFAULT_DATE), None, id="fixed_deadline"),
pytest.param(DeadlineReference.AVERAGE_RUNTIME(), None, id="average_runtime"),
],
)
def test_deadline_database_integration(self, reference, expected_column, session):
"""
Test database integration for all deadline types.
Verifies:
1. Calculated deadlines call _fetch_from_db with correct column.
2. Fixed deadlines do not interact with database.
3. Intervals are added to reference times.
"""
conditions = {"dag_id": DAG_ID, "run_id": "dagrun_1"}
interval = timedelta(hours=1)
with mock.patch("airflow.models.deadline._fetch_from_db") as mock_fetch:
mock_fetch.return_value = DEFAULT_DATE
if expected_column is not None:
result = reference.evaluate_with(session=session, interval=interval, **conditions)
mock_fetch.assert_called_once_with(expected_column, session=session, **conditions)
elif reference == DeadlineReference.AVERAGE_RUNTIME():
with mock.patch("airflow._shared.timezones.timezone.utcnow") as mock_utcnow:
mock_utcnow.return_value = DEFAULT_DATE
# No DAG runs exist, so it should use 24-hour default
result = reference.evaluate_with(session=session, interval=interval, dag_id=DAG_ID)
mock_fetch.assert_not_called()
# Should return None when no DAG runs exist
assert result is None
else:
result = reference.evaluate_with(session=session, interval=interval)
mock_fetch.assert_not_called()
assert result == DEFAULT_DATE + interval
def test_average_runtime_with_sufficient_history(self, session, dag_maker):
"""Test AverageRuntimeDeadline when enough historical data exists."""
with dag_maker(DAG_ID):
EmptyOperator(task_id="test_task")
# Create 10 completed DAG runs with known durations
base_time = DEFAULT_DATE
durations = [3600, 7200, 1800, 5400, 2700, 4500, 3300, 6000, 2400, 4200]
for i, duration in enumerate(durations):
logical_date = base_time + timedelta(days=i)
start_time = logical_date + timedelta(minutes=5)
end_time = start_time + timedelta(seconds=duration)
dagrun = dag_maker.create_dagrun(
logical_date=logical_date, run_id=f"test_run_{i}", state=DagRunState.SUCCESS
)
# Manually set start and end times
dagrun.start_date = start_time
dagrun.end_date = end_time
session.commit()
# Test with default max_runs (10)
reference = DeadlineReference.AVERAGE_RUNTIME()
interval = timedelta(hours=1)
with mock.patch("airflow._shared.timezones.timezone.utcnow") as mock_utcnow:
mock_utcnow.return_value = DEFAULT_DATE
result = reference.evaluate_with(session=session, interval=interval, dag_id=DAG_ID)
# Calculate expected average: sum(durations) / len(durations)
expected_avg_seconds = sum(durations) / len(durations)
expected = DEFAULT_DATE + timedelta(seconds=expected_avg_seconds) + interval
# Compare only up to minutes to avoid sub-second timing issues in CI
assert result.replace(second=0, microsecond=0) == expected.replace(second=0, microsecond=0)
def test_average_runtime_with_insufficient_history(self, session, dag_maker):
"""Test AverageRuntimeDeadline when insufficient historical data exists."""
with dag_maker(DAG_ID):
EmptyOperator(task_id="test_task")
# Create only 5 completed DAG runs (less than default max_runs of 10)
base_time = DEFAULT_DATE
durations = [3600, 7200, 1800, 5400, 2700]
for i, duration in enumerate(durations):
logical_date = base_time + timedelta(days=i)
start_time = logical_date + timedelta(minutes=5)
end_time = start_time + timedelta(seconds=duration)
dagrun = dag_maker.create_dagrun(
logical_date=logical_date, run_id=f"insufficient_run_{i}", state=DagRunState.SUCCESS
)
# Manually set start and end times
dagrun.start_date = start_time
dagrun.end_date = end_time
session.commit()
reference = DeadlineReference.AVERAGE_RUNTIME()
interval = timedelta(hours=1)
with mock.patch("airflow._shared.timezones.timezone.utcnow") as mock_utcnow:
mock_utcnow.return_value = DEFAULT_DATE
result = reference.evaluate_with(session=session, interval=interval, dag_id=DAG_ID)
# Should return None since insufficient runs
assert result is None
def test_average_runtime_with_min_runs(self, session, dag_maker):
"""Test AverageRuntimeDeadline with min_runs parameter allowing calculation with fewer runs."""
with dag_maker(DAG_ID):
EmptyOperator(task_id="test_task")
# Create only 3 completed DAG runs
base_time = DEFAULT_DATE
durations = [3600, 7200, 1800] # 1h, 2h, 30min
for i, duration in enumerate(durations):
logical_date = base_time + timedelta(days=i)
start_time = logical_date + timedelta(minutes=5)
end_time = start_time + timedelta(seconds=duration)
dagrun = dag_maker.create_dagrun(
logical_date=logical_date, run_id=f"min_runs_test_{i}", state=DagRunState.SUCCESS
)
# Manually set start and end times
dagrun.start_date = start_time
dagrun.end_date = end_time
session.commit()
# Test with min_runs=2, should work with 3 runs
reference = DeadlineReference.AVERAGE_RUNTIME(max_runs=10, min_runs=2)
interval = timedelta(hours=1)
with mock.patch("airflow._shared.timezones.timezone.utcnow") as mock_utcnow:
mock_utcnow.return_value = DEFAULT_DATE
result = reference.evaluate_with(session=session, interval=interval, dag_id=DAG_ID)
# Should calculate average from 3 runs
expected_avg_seconds = sum(durations) / len(durations) # 4200 seconds
expected = DEFAULT_DATE + timedelta(seconds=expected_avg_seconds) + interval
# Compare only up to minutes to avoid sub-second timing issues in CI
assert result.replace(second=0, microsecond=0) == expected.replace(second=0, microsecond=0)
# Test with min_runs=5, should return None with only 3 runs
reference = DeadlineReference.AVERAGE_RUNTIME(max_runs=10, min_runs=5)
with mock.patch("airflow._shared.timezones.timezone.utcnow") as mock_utcnow:
mock_utcnow.return_value = DEFAULT_DATE
result = reference.evaluate_with(session=session, interval=interval, dag_id=DAG_ID)
assert result is None
def test_average_runtime_min_runs_validation(self):
"""Test that min_runs must be at least 1."""
with pytest.raises(ValueError, match="min_runs must be at least 1"):
DeadlineReference.AVERAGE_RUNTIME(max_runs=10, min_runs=0)
with pytest.raises(ValueError, match="min_runs must be at least 1"):
DeadlineReference.AVERAGE_RUNTIME(max_runs=10, min_runs=-1)
|
TestCalculatedDeadlineDatabaseCalls
|
python
|
gevent__gevent
|
src/gevent/testing/flaky.py
|
{
"start": 1931,
"end": 4104
}
|
class ____(FlakyTest):
"""
Use this when the test sometimes crashes.
"""
def reraiseFlakyTestRaceCondition():
six.reraise(FlakyAssertionError,
FlakyAssertionError(sys.exc_info()[1]),
sys.exc_info()[2])
reraiseFlakyTestTimeout = reraiseFlakyTestRaceCondition
reraiseFlakyTestRaceConditionLibuv = reraiseFlakyTestRaceCondition
reraiseFlakyTestTimeoutLibuv = reraiseFlakyTestRaceCondition
if sysinfo.RUNNING_ON_CI or (sysinfo.PYPY and sysinfo.WIN):
# pylint: disable=function-redefined
def reraiseFlakyTestRaceCondition():
# Getting stack traces is incredibly expensive
# in pypy on win, at least in test virtual machines.
# It can take minutes. The traceback consistently looks like
# the following when interrupted:
# dump_stacks -> traceback.format_stack
# -> traceback.extract_stack -> linecache.checkcache
# -> os.stat -> _structseq.structseq_new
# Moreover, without overriding __repr__ or __str__,
# the msg doesn't get printed like we would want (its basically
# unreadable, all printed on one line). So skip that.
#msg = '\n'.join(dump_stacks())
msg = str(sys.exc_info()[1])
six.reraise(FlakyTestRaceCondition,
FlakyTestRaceCondition(msg),
sys.exc_info()[2])
def reraiseFlakyTestTimeout():
msg = str(sys.exc_info()[1])
six.reraise(FlakyTestTimeout,
FlakyTestTimeout(msg),
sys.exc_info()[2])
if sysinfo.LIBUV:
reraiseFlakyTestRaceConditionLibuv = reraiseFlakyTestRaceCondition
reraiseFlakyTestTimeoutLibuv = reraiseFlakyTestTimeout
def reraises_flaky_timeout(exc_kind=AssertionError, _func=reraiseFlakyTestTimeout):
def wrapper(f):
@functools.wraps(f)
def m(*args):
try:
f(*args)
except exc_kind:
_func()
return m
return wrapper
def reraises_flaky_race_condition(exc_kind=AssertionError):
return reraises_flaky_timeout(exc_kind, _func=reraiseFlakyTestRaceCondition)
|
FlakyTestCrashes
|
python
|
crytic__slither
|
slither/detectors/functions/permit_domain_signature_collision.py
|
{
"start": 489,
"end": 3483
}
|
class ____(AbstractDetector):
"""
Domain separator collision
"""
ARGUMENT = "domain-separator-collision"
HELP = "Detects ERC20 tokens that have a function whose signature collides with EIP-2612's DOMAIN_SEPARATOR()"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#domain-separator-collision"
)
WIKI_TITLE = "Domain separator collision"
WIKI_DESCRIPTION = "An ERC20 token has a function whose signature collides with EIP-2612's DOMAIN_SEPARATOR(), causing unanticipated behavior for contracts using `permit` functionality."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
function some_collisions() external() {}
}
```
`some_collision` clashes with EIP-2612's DOMAIN_SEPARATOR() and will interfere with contract's using `permit`."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove or rename the function that collides with DOMAIN_SEPARATOR()."
def _detect(self) -> List[Output]:
domain_sig = get_function_id("DOMAIN_SEPARATOR()")
for contract in self.compilation_unit.contracts_derived:
if contract.is_erc20():
funcs_and_vars: List[Union[Function, StateVariable]] = contract.functions_entry_points + contract.state_variables_entry_points # type: ignore
for func_or_var in funcs_and_vars:
# External/ public function names should not collide with DOMAIN_SEPARATOR()
hash_collision = (
func_or_var.solidity_signature != "DOMAIN_SEPARATOR()"
and get_function_id(func_or_var.solidity_signature) == domain_sig
)
# DOMAIN_SEPARATOR() should return bytes32
incorrect_return_type = func_or_var.solidity_signature == "DOMAIN_SEPARATOR()"
if incorrect_return_type:
if isinstance(func_or_var, Function):
incorrect_return_type = (
not func_or_var.return_type
or func_or_var.return_type[0] != ElementaryType("bytes32")
)
else:
assert isinstance(func_or_var, StateVariable)
incorrect_return_type = func_or_var.type != ElementaryType("bytes32")
if hash_collision or incorrect_return_type:
info: DETECTOR_INFO = [
"The function signature of ",
func_or_var,
" collides with DOMAIN_SEPARATOR and should be renamed or removed.\n",
]
res = self.generate_result(info)
return [res]
return []
|
DomainSeparatorCollision
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1010252,
"end": 1010845
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UnlinkRepositoryFromProject"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project = sgqlc.types.Field("Project", graphql_name="project")
"""The linked Project."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The linked Repository."""
|
UnlinkRepositoryFromProjectPayload
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_lsq_linear.py
|
{
"start": 9507,
"end": 11000
}
|
class ____:
def test_option_lsmr_tol(self):
# Should work with a positive float, string equal to 'auto', or None
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2)
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto')
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None)
# Should raise error with negative float, strings
# other than 'auto', and integers
err_message = "`lsmr_tol` must be None, 'auto', or positive float."
with pytest.raises(ValueError, match=err_message):
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1)
with pytest.raises(ValueError, match=err_message):
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo')
with pytest.raises(ValueError, match=err_message):
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1)
def test_option_lsmr_maxiter(self):
# Should work with positive integers or None
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1)
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None)
# Should raise error with 0 or negative max iter
err_message = "`lsmr_maxiter` must be None or positive integer."
with pytest.raises(ValueError, match=err_message):
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0)
with pytest.raises(ValueError, match=err_message):
_ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
|
TestErrorChecking
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/asyncpg.py
|
{
"start": 15666,
"end": 16388
}
|
class ____(PGExecutionContext):
def handle_dbapi_exception(self, e):
if isinstance(
e,
(
self.dialect.dbapi.InvalidCachedStatementError,
self.dialect.dbapi.InternalServerError,
),
):
self.dialect._invalidate_schema_cache()
def pre_exec(self):
if self.isddl:
self.dialect._invalidate_schema_cache()
self.cursor._invalidate_schema_cache_asof = (
self.dialect._invalidate_schema_cache_asof
)
if not self.compiled:
return
def create_server_side_cursor(self):
return self._dbapi_connection.cursor(server_side=True)
|
PGExecutionContext_asyncpg
|
python
|
gevent__gevent
|
src/greentest/3.10/test_context.py
|
{
"start": 12429,
"end": 12534
}
|
class ____(Exception):
pass
@unittest.skipIf(hamt is None, '_testcapi lacks "hamt()" function')
|
EqError
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 40894,
"end": 41120
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ERROR", "EXPECTED", "FAILURE", "PENDING", "SUCCESS")
String = sgqlc.types.String
|
StatusState
|
python
|
gevent__gevent
|
src/gevent/testing/patched_tests_setup.py
|
{
"start": 22012,
"end": 69384
}
|
class ____(object):
def __init__(self, test_fqn):
self._patcher = wrapped_tests[test_fqn]
def __call__(self, orig_test_fn):
@functools.wraps(orig_test_fn)
def test(*args, **kwargs):
with self._patcher():
return orig_test_fn(*args, **kwargs)
return test
if OSX:
disabled_tests += [
'test_subprocess.POSIXProcessTestCase.test_run_abort',
# causes Mac OS X to show "Python crashes" dialog box which is annoying
]
if WIN:
disabled_tests += [
# Issue with Unix vs DOS newlines in the file vs from the server
'test_ssl.ThreadedTests.test_socketserver',
# This sometimes hangs (only on appveyor)
'test_ssl.ThreadedTests.test_asyncore_server',
# On appveyor, this sometimes produces 'A non-blocking socket
# operation could not be completed immediately', followed by
# 'No connection could be made because the target machine
# actively refused it'
'test_socket.NonBlockingTCPTests.testAccept',
# On appveyor, this test has been seen to fail on 3.9 and 3.8
]
if sys.version_info[:2] <= (3, 9):
disabled_tests += [
'test_context.HamtTest.test_hamt_collision_3',
# Sometimes fails::
#
# self.assertIn('got more than ', str(cm.exception))
# AssertionError: 'got more than ' not found in
# 'Remote end closed connection without response'
#
'test_httplib.BasicTest.test_overflowing_header_limit_after_100',
]
# These are a problem on 3.5; on 3.6+ they wind up getting (accidentally) disabled.
wrapped_tests.update({
'test_socket.SendfileUsingSendTest.testWithTimeout': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testOffset': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testRegularFile': _flaky_socket_timeout,
'test_socket.SendfileUsingSendTest.testCount': _flaky_socket_timeout,
})
if PYPY:
disabled_tests += [
# Does not exist in the CPython test suite, tests for a specific bug
# in PyPy's forking. Only runs on linux and is specific to the PyPy
# implementation of subprocess (possibly explains the extra parameter to
# _execut_child)
'test_subprocess.ProcessTestCase.test_failed_child_execute_fd_leak',
# On some platforms, this returns "zlib_compression", but the test is looking for
# "ZLIB"
'test_ssl.ThreadedTests.test_compression',
# These are flaxy, apparently a race condition? Began with PyPy 2.7-7 and 3.6-7
'test_asyncore.TestAPI_UsePoll.test_handle_error',
'test_asyncore.TestAPI_UsePoll.test_handle_read',
]
if WIN:
disabled_tests += [
# Starting in 7.3.1 on Windows, this stopped raising ValueError; it appears to
# be a bug in PyPy.
'test_signal.WakeupFDTests.test_invalid_fd',
# Likewise for 7.3.1. See the comments for PY35
'test_socket.GeneralModuleTests.test_sock_ioctl',
]
disabled_tests += [
# These are flaky, beginning in 3.6-alpha 7.0, not finding some flag
# set, apparently a race condition
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_accept',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_accepted',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_close',
'test_asyncore.TestAPI_UveIPv6Poll.test_handle_write',
'test_asyncore.TestAPI_UseIPV6Select.test_handle_read',
# These are reporting 'ssl has no attribute ...'
# This could just be an OSX thing
'test_ssl.ContextTests.test__create_stdlib_context',
'test_ssl.ContextTests.test_create_default_context',
'test_ssl.ContextTests.test_get_ciphers',
'test_ssl.ContextTests.test_options',
'test_ssl.ContextTests.test_constants',
# These tend to hang for some reason, probably not properly
# closed sockets.
'test_socketserver.SocketServerTest.test_write',
# This uses ctypes to do funky things including using ptrace,
# it hangs
'test_subprocess.ProcessTestcase.test_child_terminated_in_stopped_state',
# Certificate errors; need updated test
'test_urllib2_localnet.TestUrlopen.test_https',
]
# Generic Python 3
disabled_tests += [
# Triggers the crash reporter
'test_threading.SubinterpThreadingTests.test_daemon_threads_fatal_error',
# Relies on an implementation detail, Thread._tstate_lock
'test_threading.ThreadTests.test_tstate_lock',
# Relies on an implementation detail (reprs); we have our own version
'test_threading.ThreadTests.test_various_ops',
'test_threading.ThreadTests.test_various_ops_large_stack',
'test_threading.ThreadTests.test_various_ops_small_stack',
# Relies on Event having a _cond and an _reset_internal_locks()
# XXX: These are commented out in the source code of test_threading because
# this doesn't work.
# 'lock_tests.EventTests.test_reset_internal_locks',
# Python bug 13502. We may or may not suffer from this as its
# basically a timing race condition.
# XXX Same as above
# 'lock_tests.EventTests.test_set_and_clear',
# These tests want to assert on the type of the class that implements
# `Popen.stdin`; we use a FileObject, but they expect different subclasses
# from the `io` module
'test_subprocess.ProcessTestCase.test_io_buffered_by_default',
'test_subprocess.ProcessTestCase.test_io_unbuffered_works',
# 3.3 exposed the `endtime` argument to wait accidentally.
# It is documented as deprecated and not to be used since 3.4
# This test in 3.6.3 wants to use it though, and we don't have it.
'test_subprocess.ProcessTestCase.test_wait_endtime',
# These all want to inspect the string value of an exception raised
# by the exec() call in the child. The _posixsubprocess module arranges
# for better exception handling and printing than we do.
'test_subprocess.POSIXProcessTestCase.test_exception_bad_args_0',
'test_subprocess.POSIXProcessTestCase.test_exception_bad_executable',
'test_subprocess.POSIXProcessTestCase.test_exception_cwd',
# Relies on a 'fork_exec' attribute that we don't provide
'test_subprocess.POSIXProcessTestCase.test_exception_errpipe_bad_data',
'test_subprocess.POSIXProcessTestCase.test_exception_errpipe_normal',
# Python 3 fixed a bug if the stdio file descriptors were closed;
# we still have that bug
'test_subprocess.POSIXProcessTestCase.test_small_errpipe_write_fd',
# Relies on implementation details (some of these tests were added in 3.4,
# but PyPy3 is also shipping them.)
'test_socket.GeneralModuleTests.test_SocketType_is_socketobject',
'test_socket.GeneralModuleTests.test_dealloc_warn',
'test_socket.GeneralModuleTests.test_repr',
'test_socket.GeneralModuleTests.test_str_for_enums',
'test_socket.GeneralModuleTests.testGetaddrinfo',
]
if TRAVIS:
disabled_tests += [
# test_cwd_with_relative_executable tends to fail
# on Travis...it looks like the test processes are stepping
# on each other and messing up their temp directories. We tend to get things like
# saved_dir = os.getcwd()
# FileNotFoundError: [Errno 2] No such file or directory
'test_subprocess.ProcessTestCase.test_cwd_with_relative_arg',
'test_subprocess.ProcessTestCaseNoPoll.test_cwd_with_relative_arg',
'test_subprocess.ProcessTestCase.test_cwd_with_relative_executable',
# In 3.7 and 3.8 on Travis CI, this appears to take the full 3 seconds.
# Can't reproduce it locally. We have our own copy of this that takes
# timing on CI into account.
'test_subprocess.RunFuncTestCase.test_run_with_shell_timeout_and_capture_output',
]
disabled_tests += [
# XXX: BUG: We simply don't handle this correctly. On CPython,
# we wind up raising a BlockingIOError and then
# BrokenPipeError and then some random TypeErrors, all on the
# server. CPython 3.5 goes directly to socket.send() (via
# socket.makefile), whereas CPython 3.6 uses socket.sendall().
# On PyPy, the behaviour is much worse: we hang indefinitely, perhaps exposing a problem
# with our signal handling.
# In actuality, though, this test doesn't fully test the EINTR it expects
# to under gevent (because if its EWOULDBLOCK retry behaviour.)
# Instead, the failures were all due to `pthread_kill` trying to send a signal
# to a greenlet instead of a real thread. The solution is to deliver the signal
# to the real thread by letting it get the correct ID, and we previously
# used make_run_with_original to make it do that.
#
# But now that we have disabled our wrappers around Thread.join() in favor
# of the original implementation, that causes problems:
# background.join() thinks that it is the current thread, and won't let it
# be joined.
'test_wsgiref.IntegrationTests.test_interrupted_write',
]
# PyPy3 3.5.5 v5.8-beta
if PYPY3:
disabled_tests += [
# This raises 'RuntimeError: reentrant call' when exiting the
# process tries to close the stdout stream; no other platform does this.
# Seen in both 3.3 and 3.5 (5.7 and 5.8)
'test_signal.SiginterruptTest.test_siginterrupt_off',
]
disabled_tests += [
# This fails to close all the FDs, at least on CI. On OS X, many of the
# POSIXProcessTestCase fd tests have issues.
'test_subprocess.POSIXProcessTestCase.test_close_fds_when_max_fd_is_lowered',
# This has the wrong constants in 5.8 (but worked in 5.7), at least on
# OS X. It finds "zlib compression" but expects "ZLIB".
'test_ssl.ThreadedTests.test_compression',
# The below are new with 5.10.1
# This gets an EOF in violation of protocol; again, even without gevent
# (at least on OS X; it's less consistent about that on travis)
'test_ssl.NetworkedBIOTests.test_handshake',
# This passes various "invalid" strings and expects a ValueError. not sure why
# we don't see errors on CPython.
'test_subprocess.ProcessTestCase.test_invalid_env',
]
if OSX:
disabled_tests += [
# These all fail with "invalid_literal for int() with base 10: b''"
'test_subprocess.POSIXProcessTestCase.test_close_fds',
'test_subprocess.POSIXProcessTestCase.test_close_fds_after_preexec',
'test_subprocess.POSIXProcessTestCase.test_pass_fds',
'test_subprocess.POSIXProcessTestCase.test_pass_fds_inheritable',
'test_subprocess.POSIXProcessTestCase.test_pipe_cloexec',
# The below are new with 5.10.1
# These fail with 'OSError: received malformed or improperly truncated ancillary data'
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen0',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen0Plus1',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen1',
'test_socket.RecvmsgSCMRightsStreamTest.testCmsgTruncLen2Minus1',
# Using the provided High Sierra binary, these fail with
# 'ValueError: invalid protocol version _SSLMethod.PROTOCOL_SSLv3'.
# gevent code isn't involved and running them unpatched has the same issue.
'test_ssl.ContextTests.test_constructor',
'test_ssl.ContextTests.test_protocol',
'test_ssl.ContextTests.test_session_stats',
'test_ssl.ThreadedTests.test_echo',
'test_ssl.ThreadedTests.test_protocol_sslv23',
'test_ssl.ThreadedTests.test_protocol_sslv3',
'test_ssl.ThreadedTests.test_protocol_tlsv1',
'test_ssl.ThreadedTests.test_protocol_tlsv1_1',
# Similar, they fail without monkey-patching.
'test_ssl.TestPostHandshakeAuth.test_pha_no_pha_client',
'test_ssl.TestPostHandshakeAuth.test_pha_optional',
'test_ssl.TestPostHandshakeAuth.test_pha_required',
# This gets None instead of http1.1, even without gevent
'test_ssl.ThreadedTests.test_npn_protocols',
# This fails to decode a filename even without gevent,
# at least on High Sierra. Newer versions of the tests actually skip this.
'test_httpservers.SimpleHTTPServerTestCase.test_undecodable_filename',
]
disabled_tests += [
# This seems to be a buffering issue? Something isn't
# getting flushed. (The output is wrong). Under PyPy3 5.7,
# I couldn't reproduce locally in Ubuntu 16 in a VM
# or a laptop with OS X. Under 5.8.0, I can reproduce it, but only
# when run by the testrunner, not when run manually on the command line,
# so something is changing in stdout buffering in those situations.
'test_threading.ThreadJoinOnShutdown.test_2_join_in_forked_process',
'test_threading.ThreadJoinOnShutdown.test_1_join_in_forked_process',
]
if TRAVIS:
disabled_tests += [
# Likewise, but I haven't produced it locally.
'test_threading.ThreadJoinOnShutdown.test_1_join_on_shutdown',
]
if PYPY:
wrapped_tests.update({
# XXX: gevent: The error that was raised by that last call
# left a socket open on the server or client. The server gets
# to http/server.py(390)handle_one_request and blocks on
# self.rfile.readline which apparently is where the SSL
# handshake is done. That results in the exception being
# raised on the client above, but apparently *not* on the
# server. Consequently it sits trying to read from that
# socket. On CPython, when the client socket goes out of scope
# it is closed and the server raises an exception, closing the
# socket. On PyPy, we need a GC cycle for that to happen.
# Without the socket being closed and exception being raised,
# the server cannot be stopped (it runs each request in the
# same thread that would notice it had been stopped), and so
# the cleanup method added by start_https_server to stop the
# server blocks "forever".
# This is an important test, so rather than skip it in patched_tests_setup,
# we do the gc before we return.
'test_urllib2_localnet.TestUrlopen.test_https_with_cafile': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_command': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_handler': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_head_keep_alive': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_head_via_send_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_header_close': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_internal_key_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_request_line_trimming': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_return_custom_status': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_return_header_keep_alive': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_send_blank': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_send_error': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_bogus': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_digits': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_invalid': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_none': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_version_none_get': _gc_at_end,
'test_httpservers.BaseHTTPServerTestCase.test_get': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_get': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_head': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_invalid_requests': _gc_at_end,
'test_httpservers.SimpleHTTPServerTestCase.test_path_without_leading_slash': _gc_at_end,
'test_httpservers.CGIHTTPServerTestCase.test_invaliduri': _gc_at_end,
'test_httpservers.CGIHTTPServerTestCase.test_issue19435': _gc_at_end,
'test_httplib.TunnelTests.test_connect': _gc_at_end,
'test_httplib.SourceAddressTest.testHTTPConnectionSourceAddress': _gc_at_end,
# Unclear
'test_urllib2_localnet.ProxyAuthTests.test_proxy_with_bad_password_raises_httperror': _gc_at_end,
'test_urllib2_localnet.ProxyAuthTests.test_proxy_with_no_password_raises_httperror': _gc_at_end,
})
disabled_tests += [
'test_subprocess.ProcessTestCase.test_threadsafe_wait',
# XXX: It seems that threading.Timer is not being greened properly, possibly
# due to a similar issue to what gevent.threading documents for normal threads.
# In any event, this test hangs forever
'test_subprocess.POSIXProcessTestCase.test_preexec_errpipe_does_not_double_close_pipes',
# Subclasses Popen, and overrides _execute_child. Expects things to be done
# in a particular order in an exception case, but we don't follow that
# exact order
'test_selectors.PollSelectorTestCase.test_above_fd_setsize',
# This test attempts to open many many file descriptors and
# poll on them, expecting them all to be ready at once. But
# libev limits the number of events it will return at once. Specifically,
# on linux with epoll, it returns a max of 64 (ev_epoll.c).
# XXX: Hangs (Linux only)
'test_socket.NonBlockingTCPTests.testInitNonBlocking',
# We don't handle the Linux-only SOCK_NONBLOCK option
'test_socket.NonblockConstantTest.test_SOCK_NONBLOCK',
# Tries to use multiprocessing which doesn't quite work in
# monkey_test module (Windows only)
'test_socket.TestSocketSharing.testShare',
# Windows-only: Sockets have a 'ioctl' method in Python 3
# implemented in the C code. This test tries to check
# for the presence of the method in the class, which we don't
# have because we don't inherit the C implementation. But
# it should be found at runtime.
'test_socket.GeneralModuleTests.test_sock_ioctl',
# XXX This fails for an unknown reason
'test_httplib.HeaderTests.test_parse_all_octets',
]
if OSX:
disabled_tests += [
# These raise "OSError: 12 Cannot allocate memory" on both
# patched and unpatched runs
'test_socket.RecvmsgSCMRightsStreamTest.testFDPassEmpty',
]
if TRAVIS:
# This has been seen to produce "Inconsistency detected by
# ld.so: dl-open.c: 231: dl_open_worker: Assertion
# `_dl_debug_initialize (0, args->nsid)->r_state ==
# RT_CONSISTENT' failed!" and fail.
disabled_tests += [
'test_threading.ThreadTests.test_is_alive_after_fork',
# This has timing constraints that are strict and do not always
# hold.
'test_selectors.PollSelectorTestCase.test_timeout',
]
if TRAVIS:
disabled_tests += [
'test_subprocess.ProcessTestCase.test_double_close_on_error',
# This test is racy or OS-dependent. It passes locally (sufficiently fast machine)
# but fails under Travis
]
disabled_tests += [
# XXX: Hangs
'test_ssl.ThreadedTests.test_nonblocking_send',
'test_ssl.ThreadedTests.test_socketserver',
# Uses direct sendfile, doesn't properly check for it being enabled
'test_socket.GeneralModuleTests.test__sendfile_use_sendfile',
# Relies on the regex of the repr having the locked state (TODO: it'd be nice if
# we did that).
# XXX: These are commented out in the source code of test_threading because
# this doesn't work.
# 'lock_tests.LockTests.lest_locked_repr',
# 'lock_tests.LockTests.lest_repr',
# This test opens a socket, creates a new socket with the same fileno,
# closes the original socket (and hence fileno) and then
# expects that the calling setblocking() on the duplicate socket
# will raise an error. Our implementation doesn't work that way because
# setblocking() doesn't actually touch the file descriptor.
# That's probably OK because this was a GIL state error in CPython
# see https://github.com/python/cpython/commit/fa22b29960b4e683f4e5d7e308f674df2620473c
'test_socket.TestExceptions.test_setblocking_invalidfd',
]
if ARES:
disabled_tests += [
# These raise different errors or can't resolve
# the IP address correctly
'test_socket.GeneralModuleTests.test_host_resolution',
'test_socket.GeneralModuleTests.test_getnameinfo',
]
disabled_tests += [
'test_threading.MiscTestCase.test__all__',
]
# We don't actually implement socket._sendfile_use_sendfile,
# so these tests, which think they're using that and os.sendfile,
# fail.
disabled_tests += [
'test_socket.SendfileUsingSendfileTest.testCount',
'test_socket.SendfileUsingSendfileTest.testCountSmall',
'test_socket.SendfileUsingSendfileTest.testCountWithOffset',
'test_socket.SendfileUsingSendfileTest.testOffset',
'test_socket.SendfileUsingSendfileTest.testRegularFile',
'test_socket.SendfileUsingSendfileTest.testWithTimeout',
'test_socket.SendfileUsingSendfileTest.testEmptyFileSend',
'test_socket.SendfileUsingSendfileTest.testNonBlocking',
'test_socket.SendfileUsingSendfileTest.test_errors',
]
# Ditto
disabled_tests += [
'test_socket.GeneralModuleTests.test__sendfile_use_sendfile',
]
disabled_tests += [
# This test requires Linux >= 4.3. When we were running 'dist:
# trusty' on the 4.4 kernel, it passed (~July 2017). But when
# trusty became the default dist in September 2017 and updated
# the kernel to 4.11.6, it begain failing. It fails on `res =
# op.recv(assoclen + len(plain) + taglen)` (where 'op' is the
# client socket) with 'OSError: [Errno 22] Invalid argument'
# for unknown reasons. This is *after* having successfully
# called `op.sendmsg_afalg`. Post 3.6.0, what we test with,
# the test was changed to require Linux 4.9 and the data was changed,
# so this is not our fault. We should eventually update this when we
# update our 3.6 version.
# See https://bugs.python.org/issue29324
'test_socket.LinuxKernelCryptoAPI.test_aead_aes_gcm',
]
disabled_tests += [
# These want to use the private '_communicate' method, which
# our Popen doesn't have.
'test_subprocess.MiscTests.test_call_keyboardinterrupt_no_kill',
'test_subprocess.MiscTests.test_context_manager_keyboardinterrupt_no_kill',
'test_subprocess.MiscTests.test_run_keyboardinterrupt_no_kill',
# This wants to check that the underlying fileno is blocking,
# but it isn't.
'test_socket.NonBlockingTCPTests.testSetBlocking',
# 3.7b2 made it impossible to instantiate SSLSocket objects
# directly, and this tests for that, but we don't follow that change.
'test_ssl.BasicSocketTests.test_private_init',
# 3.7b2 made a change to this test that on the surface looks incorrect,
# but it passes when they run it and fails when we do. It's not
# clear why.
'test_ssl.ThreadedTests.test_check_hostname_idn',
# These appear to hang, haven't investigated why
'test_ssl.SimpleBackgroundTests.test_get_server_certificate',
# Probably the same as NetworkConnectionNoServer.test_create_connection_timeout
'test_socket.NetworkConnectionNoServer.test_create_connection',
# Internals of the threading module that change.
'test_threading.ThreadTests.test_finalization_shutdown',
'test_threading.ThreadTests.test_shutdown_locks',
# Expects a deprecation warning we don't raise
'test_threading.ThreadTests.test_old_threading_api',
# This tries to use threading.interrupt_main() from a new Thread;
# but of course that's actually the same thread and things don't
# work as expected.
'test_threading.InterruptMainTests.test_interrupt_main_subthread',
'test_threading.InterruptMainTests.test_interrupt_main_noerror',
# TLS1.3 seems flaky
'test_ssl.ThreadedTests.test_wrong_cert_tls13',
]
if APPVEYOR:
disabled_tests += [
# This sometimes produces ``self.assertEqual(1, len(s.select(0))): 1 != 0``.
# Probably needs to spin the loop once.
'test_selectors.BaseSelectorTestCase.test_timeout',
# AssertionError: OSError not raised
'test_socket.PurePythonSocketPairTest.test_injected_authentication_failure',
# subprocess.TimeoutExpired: Command '('C:\\Program Files\\Git\\usr\\bin\\true.EXE',)'
# timed out after -1 seconds
# Seen on 3.12.5 and 3.13.0rc1
# Perhaps they changed handling of negative timeouts? Doesn't
# affect any other platforms though.
'test_subprocess.ProcessTestCase.test_wait_negative_timeout',
]
disabled_tests += [
# This one seems very strict: doesn't want a pathlike
# first argument when shell is true.
'test_subprocess.RunFuncTestCase.test_run_with_pathlike_path',
# This tests for a warning we don't raise.
'test_subprocess.RunFuncTestCase.test_bufsize_equal_one_binary_mode',
# This compares the output of threading.excepthook with
# data constructed in Python. But excepthook is implemented in C
# and can't see the patched threading.get_ident() we use, so the
# output doesn't match.
'test_threading.ExceptHookTests.test_excepthook_thread_None',
]
if sys.version_info[:3] < (3, 8, 1):
disabled_tests += [
# Earlier versions parse differently so the newer test breaks
'test_ssl.BasicSocketTests.test_parse_all_sans',
'test_ssl.BasicSocketTests.test_parse_cert_CVE_2013_4238',
]
if sys.version_info[:3] < (3, 8, 10):
disabled_tests += [
# These were added for fixes sometime between 3.8.1 and 3.8.10
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_disabled',
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_enabled_default',
'test_httplib.BasicTest.test_dir_with_added_behavior_on_status',
'test_httplib.TunnelTests.test_tunnel_connect_single_send_connection_setup',
'test_ssl.TestSSLDebug.test_msg_callback_deadlock_bpo43577',
# This one fails with the updated certs
'test_ssl.ContextTests.test_load_verify_cadata',
# This one times out on 3.7.1 on Appveyor
'test_ftplib.TestTLS_FTPClassMixin.test_retrbinary_rest',
]
if RESOLVER_DNSPYTHON:
disabled_tests += [
# This does two things DNS python doesn't. First, it sends it
# capital letters and expects them to be returned lowercase.
# Second, it expects the symbolic scopeid to be stripped from the end.
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_symbolic',
]
# if 'signalfd' in os.environ.get('GEVENT_BACKEND', ''):
# # tests that don't interact well with signalfd
# disabled_tests.extend([
# 'test_signal.SiginterruptTest.test_siginterrupt_off',
# 'test_socketserver.SocketServerTest.test_ForkingTCPServer',
# 'test_socketserver.SocketServerTest.test_ForkingUDPServer',
# 'test_socketserver.SocketServerTest.test_ForkingUnixStreamServer'])
# LibreSSL reports OPENSSL_VERSION_INFO (2, 0, 0, 0, 0) regardless of its version,
# so this is known to fail on some distros. We don't want to detect this because we
# don't want to trigger the side-effects of importing ssl prematurely if we will
# be monkey-patching, so we skip this test everywhere. It doesn't do much for us
# anyway.
disabled_tests += [
'test_ssl.BasicSocketTests.test_openssl_version'
]
if OSX:
disabled_tests += [
# This sometimes produces OSError: Errno 40: Message too long
'test_socket.RecvmsgIntoTCPTest.testRecvmsgIntoGenerator',
# These sometime timeout. Cannot reproduce locally.
'test_ftp.TestTLS_FTPClassMixin.test_mlsd',
'test_ftp.TestTLS_FTPClassMixin.test_retrlines_too_long',
'test_ftp.TestTLS_FTPClassMixin.test_storlines',
'test_ftp.TestTLS_FTPClassMixin.test_retrbinary_rest',
]
if RESOLVER_ARES and PY38 and not RUNNING_ON_CI:
disabled_tests += [
# When updating to 1.16.0 this was seen locally, but not on CI.
# Tuples differ: ('ff02::1de:c0:face:8d', 1234, 0, 0)
# != ('ff02::1de:c0:face:8d', 1234, 0, 1)
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_symbolic',
]
if PY39:
disabled_tests += [
# Depends on exact details of the repr. Eww.
'test_subprocess.ProcessTestCase.test_repr',
# Tries to wait for the process without using Popen APIs, and expects the
# ``returncode`` attribute to stay None. But we have already hooked SIGCHLD, so
# we see and set the ``returncode``; there is no way to wait that doesn't do that.
'test_subprocess.POSIXProcessTestTest.test_send_signal_race',
]
if sys.version_info[:3] < (3, 9, 5):
disabled_tests += [
# These were added for fixes sometime between 3.9.1 and 3.9.5
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_disabled',
'test_ftplib.TestFTPClass.test_makepasv_issue43285_security_enabled_default',
'test_httplib.BasicTest.test_dir_with_added_behavior_on_status',
'test_httplib.TunnelTests.test_tunnel_connect_single_send_connection_setup',
'test_ssl.TestSSLDebug.test_msg_callback_deadlock_bpo43577',
# This one fails with the updated certs
'test_ssl.ContextTests.test_load_verify_cadata',
# These time out on 3.9.1 on Appveyor
'test_ftplib.TestTLS_FTPClassMixin.test_retrbinary_rest',
'test_ftplib.TestTLS_FTPClassMixin.test_retrlines_too_long',
]
if PY310:
disabled_tests += [
# They arbitrarily made some types so that they can't be created;
# that's an implementation detail we're not going to follow (
# it would require them to be factory functions).
'test_select.SelectTestCase.test_disallow_instantiation',
'test_threading.ThreadTests.test_disallow_instantiation',
# This wants two true threads to work, but a CPU bound loop
# in a greenlet can't be interrupted.
'test_threading.InterruptMainTests.test_can_interrupt_tight_loops',
# We don't currently implement pipesize.
'test_subprocess.ProcessTestCase.test_pipesize_default',
'test_subprocess.ProcessTestCase.test_pipesizes',
# Unknown
'test_signal.SiginterruptTest.test_siginterrupt_off',
]
if TRAVIS:
disabled_tests += [
# The mixing of subinterpreters (with threads) and gevent apparently
# leads to a segfault on Ubuntu/GitHubActions/3.10rc1. Not clear why.
# But that's not a great use case for gevent.
'test_threading.SubinterpThreadingTests.test_threads_join',
'test_threading.SubinterpThreadingTests.test_threads_join_2',
]
if (PY310_EXACTLY or PY39_EXACTLY) and APPVEYOR:
disabled_tests += [
# On 3.9.13, this has been seen to cause
#
# SystemError: <built-in function if_indextoname> returned
# NULL without setting an error
#
# But this isn't even our code! That's a CPython function. Must be flaky.
#
# Also observed on 3.10.11.
'test_socket.GeneralModuleTests.testInvalidInterfaceIndexToName',
]
if PY311:
disabled_tests += [
# CPython issue #27718: This wants to require all objects to
# have a __module__ of 'signal' because pydoc. Obviously our patches don't.
'test_signal.GenericTests.test_functions_module_attr',
# 3.11 added subprocess._USE_VFORK and subprocess._USE_POSIX_SPAWN.
# We don't support either of those (although USE_VFORK might be possible?)
'test_subprocess.ProcessTestCase.test__use_vfork',
# This test only runs if threading has not been imported already
# when the subprocess runs its script. But locally, and on CI,
# threading is already imported. It's not clear how, because getting the backtrace just
# shows all the internal frozen importlib modules, no actual calling code.
# The closest we get is this:
#
# <frozen site>(626)<module>()
# <frozen site>(609)main()
# <frozen site>(541)venv()
# <frozen site>(394)addsitepackages()
# <frozen site>(236)addsitedir()
# <frozen site>(195)addpackage()
# <string>(1)<module>()
#
# which makes it appear like it's one of the .pth files?
#
# The monkey_test.py runner adds some ``gevent.testing``
# imports to the top of ``test_threading.py``, and those do
# ultimately import threading, but that shouldn't affect the
# Python subprocess that runs the script being tested.
#
# Locally, I had a .pythonrc that was importing threading
# (``rich.traceback`` -> ``pygments.lexers`` ->
# ``pygments.plugin`` -> ``importlib.metadata`` -> ``zipfile``
# -> ``importlib.util`` -> ``threading``).
#
# That provided the clue we needed. ``importlib._bootstrap``
# imports ``importlib.util``, which imports threading. No idea
# how this test gets to pass on the CPython test suite.
'test_threading.ThreadTests.test_import_from_another_thread',
]
if sys.version_info[:3] < (3, 11, 8):
# New tests in that version that won't pass on earlier versions.
disabled_tests += [
'test_threading.ThreadTests.test_main_thread_after_fork_from_dummy_thread',
'tets_ssl.TestPreHandshakeClose.test_preauth_data_to_tls_client',
'test_ssl.TestPreHandshakeClose.test_preauth_data_to_tls_server',
'test_signal.PosixTests.test_no_repr_is_called_on_signal_handler',
'test_socket.GeneralModuleTests.testInvalidInterfaceIndexToName',
]
if WIN:
disabled_tests += [
'test_subprocess.ProcessTestCase.test_win32_duplicate_envs',
'test_ssl.SimpleBackgroundTests.test_transport_eof',
'test_ssl.SimpleBackgroundTests.test_bio_read_write_data',
'test_ssl.SimpleBackgroundTests.test_bio_handshake',
'test_httplib.ExtendedReadTestContentLengthKnown.test_readline_without_limit',
'test_httplib.ExtendedReadTestContentLengthKnown.test_readline',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1_unbounded',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1_bounded',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1',
'test_httplib.HeaderTests.test_ipv6host_header',
]
if PY312:
disabled_tests += [
# This test is new in 3.12.1; it appears to essentially rely
# on blocking sockets to fully read data in one call, and our
# version delivers a short initial read.
'test_ssl.ThreadedTests.test_recv_into_buffer_protocol_len',
]
if sys.version_info[:3] < (3, 12, 1):
# Appveyor still has 3.12.0 when we added the 3.12.1 tests.
# Some of them depend on changes to the stdlib/interpreter.
disabled_tests += [
'test_httplib.HeaderTests.test_ipv6host_header',
'test_interpreters.TestInterpreterClose.test_subthreads_still_running',
'test_interpreters.TestInterpreterIsRunning.test_main',
'test_interpreters.TestInterpreterIsRunning.test_with_only_background_threads',
'test_interpreters.TestInterpreterRun.test_with_background_threads_still_running',
'test_interpreters.FinalizationTests.test_gh_109793',
'test_interpreters.StartupTests.test_sys_path_0',
'test_threading.SubinterpThreadingTests.test_threads_join_with_no_main',
'test_threading.MiscTestCase.test_gh112826_missing__thread__is_main_interpreter',
]
if sys.version_info[:3] < (3, 12, 11):
disabled_tests += [
# This new tests crashes the interpreter on older versions
'test_context.ContextTest.test_context_new_unhashable_str_subclass',
# This is a new test that fails.
'test_httplib.BasicTest.test_chunked',
]
if RUN_COVERAGE:
disabled_tests += [
# This test wants to look for installed tracing functions, and
# having a coverage tracer function installed breaks it.
'test_threading.ThreadTests.test_gettrace_all_threads',
]
if WIN:
disabled_tests += [
# These three are looking for an error string that matches,
# and ours differs very slightly
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrNotTupleFailure',
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrServiceIdNotValidUUIDFailure',
'test_socket.BasicHyperVTest.testCreateHyperVSocketAddrVmIdNotValidUUIDFailure',
]
# Like the case for 3.12.1, these need VM or stdlib updates.
if sys.version_info[:3] < (3, 12, 2):
disabled_tests += [
'test_socket.GeneralModuleTests.testInvalidInterfaceIndexToName',
'test_subprocess.ProcessTestCase.test_win32_duplicate_envs',
'test_httplib.ExtendedReadTestContentLengthKnown.test_readline_without_limit',
'test_httplib.ExtendedReadTestContentLengthKnown.test_readline',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1_unbounded',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1_bounded',
'test_httplib.ExtendedReadTestContentLengthKnown.test_read1',
]
if LINUX and RUNNING_ON_CI:
disabled_tests += [
# These two try to forcibly close a socket, preventing some data
# from reaching its destination. That works OK on some platforms, but
# in this set of circumstances, because of the event loop, gevent is
# able to send that data.
'test_ssl.TestPreHandshakeClose.test_preauth_data_to_tls_client',
'test_ssl.TestPreHandshakeClose.test_preauth_data_to_tls_server',
]
if ARES:
disabled_tests += [
# c-ares doesn't like the IPv6 syntax it uses here.
'test_socket.GeneralModuleTests.test_getaddrinfo_ipv6_scopeid_symbolic',
]
if PY313:
disabled_tests += [
# Creates a fork bomb because all the threads that have been created in the
# parent process continue running in each child process.
'test_threading.ThreadJoinOnShutdown.test_reinit_tls_after_fork',
]
if OSX and RUNNING_ON_CI:
disabled_tests += [
# Sometimes times out. Cannot reproduce locally.
'test_signal.ItimerTest.test_itimer_virtual',
]
if APPVEYOR:
disabled_tests += [
]
if sys.version_info[:3] < (3, 13, 3):
disabled_tests += [
# 3.13.3 changed the way linecache works when non-file strings are being
# evaluated (backport from 3.14), which means this test won't run on anything
# older. It gets 2 lines when it expects 4.
# https://github.com/python/cpython/pull/117500
'test_subprocess.RunFuncTestCase.test_encoding_warning',
# This was a fix for a crash in the interpreter.
# https://github.com/python/cpython/issues/132002
'test_context.ContextTest.test_context_new_unhashable_str_subclass',
# This was modified in 3.13.3 for apparently a bugfix
# and won't run on older versions.
# https://github.com/python/cpython/commit/bf6c256a643b7066e922fb3a7e44aa3e006a29f3
'test_httplib.BasicTest.test_chunked',
]
if APPVEYOR:
disabled_tests += [
]
if PY314:
disabled_tests += [
# Creates a pipe using ``os.pipe``, spawns a thread to write to it, and expects to
# be able to read from it using a standard file wrapped around the descriptor with
# ``open(fileno)``. The reading code gets run first because Reasons. The thread
# is monkey-patched to be a greenlet, but pipe and open are not patched, so
# we block forever attempting to read. This would be a potential problem for
# anything using the ``_running`` context manager.
'test__interpreters.DestroyTests.test_still_running',
# Same.
'test__interpreters.RunStringTests.test_already_running',
# Attempts to use ``signal.pthread_kill`` to send a signal to the
# main thread to interrupt it, but not kill it. Currently there is
# no way to emulate this behaviour with greenlets.
'test_socketserver.SocketWriterTest.test_write',
# Tries to use ``sys.unraisablehook`` to catch exceptions
# from a background thread. We have no way to intercept that.
'test_thread.ThreadRunningTests.test_unraisable_exception',
]
if ARES:
disabled_tests += [
# ends up in ``urlib.request._is_local_authority`` for a local
# file URL. That passes it to ``gethostbyname`` and expects it to
# raise a ``socket.gaierror`` on invalid names. But the ares resolver
# raises a ``socket.herror``. I can't fix this without breaking other tests
# (I *think*)
'test_urllib2.HandlerTests.test_file',
]
if TRAVIS:
disabled_tests += [
# These tests frequently break when we try to use newer Travis CI images,
# due to different versions of OpenSSL being available. See above for some
# specific examples. Usually the tests catch up, eventually (e.g., at this writing,
# the 3.9b1 tests are fine on Ubuntu Bionic, but all other versions fail).
'test_ssl.ContextTests.test_options',
'test_ssl.ThreadedTests.test_alpn_protocols',
'test_ssl.ThreadedTests.test_default_ecdh_curve',
'test_ssl.ThreadedTests.test_shared_ciphers',
]
if RUNNING_ON_MUSLLINUX:
disabled_tests += [
# This is supposed to *not* crash, but on the muslilnux image, it
# does crash (exitcode -11, ie, SIGSEGV)
'test_threading.ThreadingExceptionTests.test_recursion_limit',
]
# Now build up the data structure we'll use to actually find disabled tests
# to avoid a linear scan for every file (it seems the list could get quite large)
# (First, freeze the source list to make sure it isn't modified anywhere)
def _build_test_structure(sequence_of_tests):
_disabled_tests = frozenset(sequence_of_tests)
disabled_tests_by_file = collections.defaultdict(set)
for file_case_meth in _disabled_tests:
file_name, _rest = file_case_meth.split('.', 1)
by_file = disabled_tests_by_file[file_name]
by_file.add(file_case_meth)
return disabled_tests_by_file
_disabled_tests_by_file = _build_test_structure(disabled_tests)
_wrapped_tests_by_file = _build_test_structure(wrapped_tests)
def disable_tests_in_source(source, filename):
# Source and filename are both native strings.
if filename.startswith('./'):
# turn "./test_socket.py" (used for auto-complete) into "test_socket.py"
filename = filename[2:]
if filename.endswith('.py'):
filename = filename[:-3]
# XXX ignoring TestCase class name (just using function name).
# Maybe we should do this with the AST, or even after the test is
# imported.
my_disabled_tests = _disabled_tests_by_file.get(filename, ())
my_wrapped_tests = _wrapped_tests_by_file.get(filename, {})
if my_disabled_tests or my_wrapped_tests:
# Insert our imports early in the file.
# If we do it on a def-by-def basis, we can break syntax
# if the function is already decorated
pattern = r'^import .*'
replacement = r'from gevent.testing import patched_tests_setup as _GEVENT_PTS;'
replacement += r'import unittest as _GEVENT_UTS;'
replacement += r'\g<0>'
source, n = re.subn(pattern, replacement, source,
count=1, flags=re.MULTILINE)
print("Added imports", n)
# Test cases will always be indented some,
# so use [ \t]+. Without indentation, test_main, commonly used as the
# __main__ function at the top level, could get matched. \s matches
# newlines even in MULTILINE mode so it would still match that.
my_disabled_testcases = set()
for test in my_disabled_tests:
# test will be:
# test_module.TestClass.test_method
# or
# test_module.TestClass
if test.count('.') == 1:
_module, class_name = test.split('.')
# disabling a class.
#
# class CLifoTest(BaseCLass):
# ->
# class _GEVENT_DISABLE_ClifoTest:
#
# Unittest discover finds subclasses of TestCase,
# so make that not be true. This will fail if the definition
# crosses multiple lines...
pattern = r"class " + class_name + r'.*\):'
no_test_class_name = class_name.replace("Test", "")
replacement = 'class _GEVENT_DISABLE_' + no_test_class_name + ":"
source, n = re.subn(pattern, replacement, source,
flags=re.MULTILINE)
else:
testcase = test.split('.')[-1]
my_disabled_testcases.add(testcase)
# def foo_bar(self)
# ->
# @_GEVENT_UTS.skip('Removed by patched_tests_setup')
# def foo_bar(self)
pattern = r"^([ \t]+)def " + testcase
replacement = r"\1@_GEVENT_UTS.skip('Removed by patched_tests_setup: %s')\n" % (test,)
replacement += r"\g<0>"
source, n = re.subn(pattern, replacement, source,
flags=re.MULTILINE)
print('Skipped %s (%d)' % (test, n), file=sys.stderr)
for test in my_wrapped_tests:
testcase = test.split('.')[-1]
if testcase in my_disabled_testcases:
print("Not wrapping %s because it is skipped" % (test,))
continue
# def foo_bar(self)
# ->
# @_GEVENT_PTS._PatchedTest('file.Case.name')
# def foo_bar(self)
pattern = r"^([ \t]+)def " + testcase
replacement = r"\1@_GEVENT_PTS._PatchedTest('%s')\n" % (test,)
replacement += r"\g<0>"
source, n = re.subn(pattern, replacement, source, 0, re.MULTILINE)
print('Wrapped %s (%d)' % (testcase, n), file=sys.stderr)
return source
|
_PatchedTest
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_ddlevents.py
|
{
"start": 14779,
"end": 16821
}
|
class ____(DDLEventHarness):
requires_table_to_exist = True
def test_straight_create_drop(
self,
metadata,
connection,
produce_subject,
produce_table_integrated_subject,
produce_event_target,
):
subject = produce_subject
assert_subject = produce_event_target
if self.requires_table_to_exist:
metadata.create_all(connection, checkfirst=False)
subject.drop(connection)
canary = mock.Mock()
event.listen(subject, "before_create", canary.before_create)
event.listen(subject, "after_create", canary.after_create)
event.listen(subject, "before_drop", canary.before_drop)
event.listen(subject, "after_drop", canary.after_drop)
subject.create(connection)
eq_(
canary.mock_calls,
[
mock.call.before_create(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
mock.call.after_create(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
],
)
subject.drop(connection)
eq_(
canary.mock_calls,
[
mock.call.before_create(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
mock.call.after_create(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
mock.call.before_drop(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
mock.call.after_drop(
assert_subject,
connection,
_ddl_runner=mock.ANY,
),
],
)
|
DDLEventWCreateHarness
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/pool/base.py
|
{
"start": 39028,
"end": 40872
}
|
class ____(PoolProxiedConnection):
"""provides the :class:`.PoolProxiedConnection` interface for cases where
the DBAPI connection is not actually proxied.
This is used by the engine internals to pass a consistent
:class:`.PoolProxiedConnection` object to consuming dialects in response to
pool events that may not always have the :class:`._ConnectionFairy`
available.
"""
__slots__ = ("dbapi_connection", "_connection_record", "_is_valid")
dbapi_connection: DBAPIConnection
_connection_record: ConnectionPoolEntry
def __init__(
self,
dbapi_connection: DBAPIConnection,
connection_record: ConnectionPoolEntry,
):
self.dbapi_connection = dbapi_connection
self._connection_record = connection_record
self._is_valid = True
@property
def driver_connection(self) -> Any: # type: ignore[override] # mypy#4125
return self._connection_record.driver_connection
@property
def connection(self) -> DBAPIConnection:
return self.dbapi_connection
@property
def is_valid(self) -> bool:
"""Implement is_valid state attribute.
for the adhoc proxied connection it's assumed the connection is valid
as there is no "invalidate" routine.
"""
return self._is_valid
def invalidate(
self, e: Optional[BaseException] = None, soft: bool = False
) -> None:
self._is_valid = False
@util.ro_non_memoized_property
def record_info(self) -> Optional[_InfoType]:
return self._connection_record.record_info
def cursor(self, *args: Any, **kwargs: Any) -> DBAPICursor:
return self.dbapi_connection.cursor(*args, **kwargs)
def __getattr__(self, key: Any) -> Any:
return getattr(self.dbapi_connection, key)
|
_AdhocProxiedConnection
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-amazon-seller-partner/components.py
|
{
"start": 13302,
"end": 13951
}
|
class ____(TypeTransformer):
def __init__(self, *args, **kwargs):
config = TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization
super().__init__(config)
self.registerCustomTransform(self.get_transform_function())
@staticmethod
def get_transform_function():
def transform_function(original_value: Any, field_schema: Dict[str, Any]) -> Any:
if original_value == "" and field_schema.get("format") == "date-time":
return None
return original_value
return transform_function
@dataclass
|
FlatFileSettlementV2ReportsTypeTransformer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/callbackProtocol9.py
|
{
"start": 192,
"end": 222
}
|
class ____:
__call__ = A()
|
B
|
python
|
pytorch__pytorch
|
torch/export/pt2_archive/_package.py
|
{
"start": 2415,
"end": 6077
}
|
class ____:
"""
Context manager for writing a PT2 archive.
"""
def __init__(self, archive_path_or_buffer: FileLike):
if isinstance(archive_path_or_buffer, str):
archive_path_or_buffer = normalize_path_separator(archive_path_or_buffer)
self.archive_file = torch._C.PyTorchFileWriter(archive_path_or_buffer) # type: ignore[arg-type]
# NOTICE: version here is different from the archive_version
# this is the version of zip file format, which is used by PyTorchFileWriter, which write to /.data/version
# archive_version is the version of the PT2 archive spec, which write to /archive_version
self.archive_file.set_min_version(6)
def __enter__(self) -> "PT2ArchiveWriter":
return self
def __exit__(self, *args: Any) -> None:
if not self.has_record(ARCHIVE_FORMAT_PATH):
self.write_string(ARCHIVE_FORMAT_PATH, ARCHIVE_FORMAT_VALUE)
if not self.has_record(ARCHIVE_VERSION_PATH):
self.write_string(ARCHIVE_VERSION_PATH, ARCHIVE_VERSION_VALUE)
self.close()
def has_record(self, name: str) -> bool:
"""
Check if a record exists in the archive.
"""
return name in self.archive_file.get_all_written_records()
def count_prefix(self, prefix: str) -> int:
"""
Count the number of records that start with a given prefix.
"""
return sum(
1
for record in self.archive_file.get_all_written_records()
if record.startswith(prefix)
)
def write_bytes(self, name: str, data: bytes) -> None:
"""
Write a bytes object to the archive.
name: The destination file inside the archive.
data: The bytes object to write.
"""
assert isinstance(data, bytes), f"Expected bytes but got {type(data)}"
self.archive_file.write_record(name, data, len(data))
def write_string(self, name: str, data: str) -> None:
"""
Write a string object to the archive.
name: The destination file inside the archive.
data: The string object to write.
"""
assert isinstance(data, str), f"Expected string but got {type(data)}"
data_bytes = data.encode()
self.write_bytes(name, data_bytes)
def write_file(self, name: str, file_path: str) -> None:
"""
Copy a file into the archive.
name: The destination file inside the archive.
file_path: The source file on disk.
"""
assert os.path.isfile(file_path), f"{file_path} is not a valid file path"
with open(file_path, "rb") as f:
file_bytes = f.read()
self.write_bytes(name, file_bytes)
def write_folder(self, archive_dir: str, folder_dir: str) -> None:
"""
Copy a folder into the archive.
archive_dir: The destination folder inside the archive.
folder_dir: The source folder on disk.
"""
assert os.path.isdir(folder_dir), f"{folder_dir} is not a valid directory path"
file_paths = filter(
os.path.isfile, glob.glob(f"{folder_dir}/**", recursive=True)
)
for file_path in file_paths:
# pyrefly: ignore [no-matching-overload]
filename = os.path.relpath(file_path, folder_dir)
archive_path = os.path.join(archive_dir, filename)
# pyrefly: ignore [bad-argument-type]
self.write_file(archive_path, file_path)
def close(self) -> None:
"""
Close the archive.
"""
self.archive_file.write_end_of_file()
|
PT2ArchiveWriter
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/orm/dynamic_rel.py
|
{
"start": 471,
"end": 673
}
|
class ____(Base):
__tablename__ = "address"
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
email_address: Mapped[str]
|
Address
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/multiprocessing/api.py
|
{
"start": 15102,
"end": 25451
}
|
class ____(abc.ABC):
"""
The base class that standardizes operations over a set of processes that are launched via different mechanisms.
The name ``PContext`` is intentional to disambiguate with ``torch.multiprocessing.ProcessContext``.
.. warning:: stdouts and stderrs should ALWAYS be a superset of
tee_stdouts and tee_stderrs (respectively) this is b/c
tee is implemented as a redirect + tail -f <stdout/stderr.log>
Args:
duplicate_stdout_filters:
If non-empty, duplicates stdouts specified in ``logs_specs``'s ``tee``
to a file containing only lines that match _any_ of the filter strings.
The log file is aggregated across all ranks selected by ``tee``.
duplicate_stderr_filters:
If non-empty, duplicates stderrs specified in ``logs_specs``'s ``tee``
to a file containing only lines that match _any_ of the filter strings.
The log file is aggregated across all ranks selected by ``tee``.
"""
def __init__(
self,
name: str,
entrypoint: Callable | str,
args: dict[int, tuple],
envs: dict[int, dict[str, str]],
logs_specs: LogsSpecs,
log_line_prefixes: dict[int, str] | None = None,
duplicate_stdout_filters: list[str] | None = None,
duplicate_stderr_filters: list[str] | None = None,
):
self.name = name
# validate that all mappings have the same number of keys and
# all local ranks are accounted for
nprocs = len(args)
# TODO log_line_prefixes can be expanded too
logs_dest = logs_specs.reify(envs)
_validate_full_rank(logs_dest.stdouts, nprocs, "stdouts")
_validate_full_rank(logs_dest.stderrs, nprocs, "stderrs")
self.entrypoint = entrypoint
self.args = args
self.envs = envs
self.stdouts = logs_dest.stdouts
self.stderrs = logs_dest.stderrs
self.error_files = logs_dest.error_files
self.nprocs = nprocs
self.filtered_stdout: TextIO | None = None
self.filtered_stderr: TextIO | None = None
self._tail_logs = [
TailLog(name, logs_dest.tee_stdouts, sys.stdout, log_line_prefixes),
TailLog(name, logs_dest.tee_stderrs, sys.stderr, log_line_prefixes),
]
if duplicate_stdout_filters:
self.filtered_stdout = open(
logs_dest.filtered_stdout, mode="w", errors="replace", buffering=1
)
self._tail_logs.append(
TailLog(
name,
logs_dest.tee_stdouts,
self.filtered_stdout,
log_line_prefixes,
log_line_filter=lambda line: any(
needle in line for needle in duplicate_stdout_filters
),
)
)
if duplicate_stderr_filters:
self.filtered_stderr = open(
logs_dest.filtered_stderr, mode="w", errors="replace", buffering=1
)
self._tail_logs.append(
TailLog(
name,
logs_dest.tee_stderrs,
self.filtered_stderr,
log_line_prefixes,
log_line_filter=lambda line: any(
needle in line for needle in duplicate_stderr_filters
),
)
)
def start(self) -> None:
"""Start processes using parameters defined in the constructor."""
if threading.current_thread() is threading.main_thread():
# Register signal handlers for the signals specified in the environment variable
signals_to_handle = os.environ.get(
"TORCHELASTIC_SIGNALS_TO_HANDLE", "SIGTERM,SIGINT,SIGHUP,SIGQUIT"
)
signal_list = signals_to_handle.split(",")
for sig_name in signal_list:
try:
sig = getattr(signal, sig_name.strip())
signal.signal(sig, _terminate_process_handler)
logger.info("Registered signal handler for %s", sig_name)
except (AttributeError, ValueError):
logger.warning(
"Failed to register signal handler for %s",
sig_name,
exc_info=True,
)
except RuntimeError:
if IS_WINDOWS and sig_name.strip() in [
"SIGHUP",
"SIGQUIT",
"SIGUSR1",
"SIGUSR2",
]:
logger.info(
"Signal %s is not supported on Windows, skipping", sig_name
)
else:
logger.warning(
"Failed to register signal handler for %s",
sig_name,
exc_info=True,
)
else:
logger.warning(
"Failed to register signal handlers since torchelastic is running on a child thread. "
"This could lead to orphaned worker processes if the torchrun is terminated."
)
self._start()
for tail_log in self._tail_logs:
tail_log.start()
@abc.abstractmethod
def _start(self) -> None:
"""Start processes using strategy defined in a particular context."""
raise NotImplementedError
@abc.abstractmethod
def _poll(self) -> RunProcsResult | None:
"""
Poll the run status of the processes running under this context.
This method follows an "all-or-nothing" policy and returns
a ``RunProcessResults`` object if either all processes complete
successfully or any process fails. Returns ``None`` if
all processes are still running.
"""
raise NotImplementedError
def wait(self, timeout: float = -1, period: float = 1) -> RunProcsResult | None:
"""
Wait for the specified ``timeout`` seconds, polling every ``period`` seconds
for the processes to be done. Returns ``None`` if the processes are still running
on timeout expiry. Negative timeout values are interpreted as "wait-forever".
A timeout value of zero simply queries the status of the processes (e.g. equivalent
to a poll).
.. note::
Multiprocessing library registers SIGTERM and SIGINT signal handlers that raise
``SignalException`` when the signals received. It is up to the consumer of the code
to properly handle the exception. It is important not to swallow the exception otherwise
the process would not terminate. Example of the typical workflow can be:
.. code-block:: python
pc = start_processes(...)
try:
pc.wait(1)
.. do some other work
except SignalException as e:
pc.shutdown(e.sigval, timeout=30)
If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating
received signal. If child processes will not terminate in the timeout time, the process will send
the SIGKILL.
"""
if timeout == 0:
return self._poll()
if timeout < 0:
timeout = sys.maxsize
expiry = time.time() + timeout
while time.time() < expiry:
pr = self._poll()
if pr:
return pr
time.sleep(period)
return None
@abc.abstractmethod
def pids(self) -> dict[int, int]:
"""Return pids of processes mapped by their respective local_ranks."""
raise NotImplementedError
@abc.abstractmethod
def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None:
r"""
Terminates all processes managed by this context and cleans up any
meta resources (e.g. redirect, error_file files).
"""
raise NotImplementedError
def close(self, death_sig: signal.Signals | None = None, timeout: int = 30) -> None:
r"""
Terminates all processes managed by this context and cleans up any
meta resources (e.g. redirect, error_file files).
Args:
death_sig: Death signal to terminate processes.
timeout: Time to wait for processes to finish, if process is
still alive after this time, it will be terminated via SIGKILL.
"""
if not death_sig:
death_sig = _get_default_signal()
self._close(death_sig=death_sig, timeout=timeout)
for tail_log in self._tail_logs:
tail_log.stop()
if self.filtered_stdout:
self.filtered_stdout.close()
if self.filtered_stderr:
self.filtered_stderr.close()
def get_std_cm(std_rd: str, redirect_fn):
if IS_WINDOWS or IS_MACOS or not std_rd:
return nullcontext()
else:
return redirect_fn(std_rd)
def _wrap(
local_rank: int,
fn: Callable,
args: dict[int, tuple],
envs: dict[int, dict[str, str]],
stdout_redirects: dict[int, str], # redirect file for stdout (to console if None)
stderr_redirects: dict[int, str], # redirect file for stderr (to console if None)
ret_vals: dict[int, mp.SimpleQueue],
queue_finished_reading_event: synchronize.Event,
numa_options: NumaOptions | None,
) -> None:
# get the per-rank params up front so we fail fast if no mapping is found
args_ = args[local_rank]
env_ = envs[local_rank]
ret_val_ = ret_vals[local_rank]
stdout_rd = stdout_redirects[local_rank]
stderr_rd = stderr_redirects[local_rank]
stdout_cm = get_std_cm(stdout_rd, redirect_stdout)
stderr_cm = get_std_cm(stderr_rd, redirect_stderr)
for k, v in env_.items():
os.environ[k] = v
with stdout_cm, stderr_cm:
fn = maybe_wrap_with_numa_binding(
fn, gpu_index=local_rank, numa_options=numa_options
)
ret = record(fn)(*args_)
ret_val_.put(ret)
queue_finished_reading_event.wait()
|
PContext
|
python
|
run-llama__llama_index
|
llama-index-integrations/retrievers/llama-index-retrievers-duckdb-retriever/llama_index/retrievers/duckdb_retriever/base.py
|
{
"start": 1229,
"end": 4326
}
|
class ____(BaseRetriever):
def __init__(
self,
database_name: str = ":memory:",
table_name: str = "documents",
text_search_config: dict = {
"stemmer": "english",
"stopwords": "english",
"ignore": r"(\\.|[^a-z])+",
"strip_accents": True,
"lower": True,
"overwrite": True,
},
persist_dir: str = "./storage",
node_id_column: str = "node_id",
text_column: str = "text",
# TODO: Add more options for FTS index creation
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
) -> None:
self._similarity_top_k = similarity_top_k
self._callback_manager = callback_manager
self._verbose = verbose
self._table_name = table_name
self._node_id_column = node_id_column
self._text_column = text_column
# TODO: Check if the vector store already has data
# Create an FTS index on the 'text' column if it doesn't already exist
if database_name == ":memory:":
self._database_path = ":memory:"
else:
self._database_path = os.path.join(persist_dir, database_name)
strip_accents = 1 if text_search_config["strip_accents"] else 0
lower = 1 if text_search_config["lower"] else 0
overwrite = 1 if text_search_config["overwrite"] else 0
ignore = text_search_config["ignore"]
sql = f"""
PRAGMA create_fts_index({self._table_name}, {self._node_id_column}, {self._text_column},
stemmer = '{text_search_config["stemmer"]}',
stopwords = '{text_search_config["stopwords"]}', ignore = '{ignore}',
strip_accents = {strip_accents}, lower = {lower}, overwrite = {overwrite})
"""
with DuckDBLocalContext(self._database_path) as conn:
conn.execute(sql)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
if self._verbose:
logger.info(f"Searching for: {query_bundle.query_str}")
query = query_bundle.query_str
sql = f"""
SELECT
fts_main_{self._table_name}.match_bm25({self._node_id_column}, ?) AS score,
{self._node_id_column}, {self._text_column}
FROM {self._table_name}
WHERE score IS NOT NULL
ORDER BY score DESC
LIMIT {self._similarity_top_k};
"""
with DuckDBLocalContext(self._database_path) as conn:
query_result = conn.execute(sql, [query]).fetchall()
# Convert query result to NodeWithScore objects
retrieve_nodes = []
for row in query_result:
score, node_id, text = row
node = TextNode(id=node_id, text=text)
retrieve_nodes.append(NodeWithScore(node=node, score=float(score)))
return retrieve_nodes
|
DuckDBRetriever
|
python
|
scrapy__scrapy
|
tests/test_downloadermiddleware_httpproxy.py
|
{
"start": 251,
"end": 18694
}
|
class ____:
failureException = AssertionError # type: ignore[assignment]
def setup_method(self):
self._oldenv = os.environ.copy()
def teardown_method(self):
os.environ = self._oldenv
def test_not_enabled(self):
crawler = get_crawler(Spider, {"HTTPPROXY_ENABLED": False})
with pytest.raises(NotConfigured):
HttpProxyMiddleware.from_crawler(crawler)
def test_no_environment_proxies(self):
os.environ = {"dummy_proxy": "reset_env_and_do_not_raise"}
mw = HttpProxyMiddleware()
for url in ("http://e.com", "https://e.com", "file:///tmp/a"):
req = Request(url)
assert mw.process_request(req) is None
assert req.url == url
assert req.meta == {}
def test_environment_proxies(self):
os.environ["http_proxy"] = http_proxy = "https://proxy.for.http:3128"
os.environ["https_proxy"] = https_proxy = "http://proxy.for.https:8080"
os.environ.pop("file_proxy", None)
mw = HttpProxyMiddleware()
for url, proxy in [
("http://e.com", http_proxy),
("https://e.com", https_proxy),
("file://tmp/a", None),
]:
req = Request(url)
assert mw.process_request(req) is None
assert req.url == url
assert req.meta.get("proxy") == proxy
def test_proxy_precedence_meta(self):
os.environ["http_proxy"] = "https://proxy.com"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org", meta={"proxy": "https://new.proxy:3128"})
assert mw.process_request(req) is None
assert req.meta == {"proxy": "https://new.proxy:3128"}
def test_proxy_auth(self):
os.environ["http_proxy"] = "https://user:pass@proxy:3128"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcjpwYXNz"
# proxy from request.meta
req = Request(
"http://scrapytest.org",
meta={"proxy": "https://username:password@proxy:3128"},
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert (
req.headers.get("Proxy-Authorization") == b"Basic dXNlcm5hbWU6cGFzc3dvcmQ="
)
def test_proxy_auth_empty_passwd(self):
os.environ["http_proxy"] = "https://user:@proxy:3128"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcjo="
# proxy from request.meta
req = Request(
"http://scrapytest.org", meta={"proxy": "https://username:@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcm5hbWU6"
def test_proxy_auth_encoding(self):
# utf-8 encoding
os.environ["http_proxy"] = "https://m\u00e1n:pass@proxy:3128"
mw = HttpProxyMiddleware(auth_encoding="utf-8")
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic bcOhbjpwYXNz"
# proxy from request.meta
req = Request(
"http://scrapytest.org", meta={"proxy": "https://\u00fcser:pass@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic w7xzZXI6cGFzcw=="
# default latin-1 encoding
mw = HttpProxyMiddleware(auth_encoding="latin-1")
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic beFuOnBhc3M="
# proxy from request.meta, latin-1 encoding
req = Request(
"http://scrapytest.org", meta={"proxy": "https://\u00fcser:pass@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic /HNlcjpwYXNz"
def test_proxy_already_seted(self):
os.environ["http_proxy"] = "https://proxy.for.http:3128"
mw = HttpProxyMiddleware()
req = Request("http://noproxy.com", meta={"proxy": None})
assert mw.process_request(req) is None
assert "proxy" in req.meta
assert req.meta["proxy"] is None
def test_no_proxy(self):
os.environ["http_proxy"] = "https://proxy.for.http:3128"
mw = HttpProxyMiddleware()
os.environ["no_proxy"] = "*"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" not in req.meta
os.environ["no_proxy"] = "other.com"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" in req.meta
os.environ["no_proxy"] = "other.com,noproxy.com"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" not in req.meta
# proxy from meta['proxy'] takes precedence
os.environ["no_proxy"] = "*"
req = Request("http://noproxy.com", meta={"proxy": "http://proxy.com"})
assert mw.process_request(req) is None
assert req.meta == {"proxy": "http://proxy.com"}
def test_no_proxy_invalid_values(self):
os.environ["no_proxy"] = "/var/run/docker.sock"
mw = HttpProxyMiddleware()
# '/var/run/docker.sock' may be used by the user for
# no_proxy value but is not parseable and should be skipped
assert "no" not in mw.proxies
def test_add_proxy_without_credentials(self):
middleware = HttpProxyMiddleware()
request = Request("https://example.com")
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_add_proxy_with_credentials(self):
middleware = HttpProxyMiddleware()
request = Request("https://example.com")
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_remove_proxy_without_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = None
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_remove_proxy_with_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = None
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_add_credentials(self):
"""If the proxy request meta switches to a proxy URL with the same
proxy and adds credentials (there were no credentials before), the new
credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_credentials(self):
"""If the proxy request meta switches to a proxy URL with different
credentials, those new credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user2:password2@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user2",
"password2",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_remove_credentials(self):
"""If the proxy request meta switches to a proxy URL with the same
proxy but no credentials, the original credentials must be still
used.
To remove credentials while keeping the same proxy URL, users must
delete the Proxy-Authorization header.
"""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
request.meta["proxy"] = "https://example.com"
del request.headers[b"Proxy-Authorization"]
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_add_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_proxy_keep_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
# Make sure, indirectly, that _auth_proxy is updated.
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_change_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user2:password2@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user2",
"password2",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_proxy_remove_credentials(self):
"""If the proxy request meta switches to a proxy URL with a different
proxy and no credentials, no credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.org"
assert middleware.process_request(request) is None
assert request.meta == {"proxy": "https://example.org"}
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_remove_credentials_preremoved_header(self):
"""Corner case of proxy switch with credentials removal where the
credentials have been removed beforehand.
It ensures that our implementation does not assume that the credentials
header exists when trying to remove it.
"""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.org"
del request.headers[b"Proxy-Authorization"]
assert middleware.process_request(request) is None
assert request.meta == {"proxy": "https://example.org"}
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_undefined_proxy(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
)
assert middleware.process_request(request) is None
assert "proxy" not in request.meta
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_disabled_proxy(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
meta={"proxy": None},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_proxy_without_credentials(self):
"""As long as the proxy URL in request metadata remains the same, the
Proxy-Authorization header is used and kept, and may even be
changed."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic foo"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic foo"
request.headers["Proxy-Authorization"] = b"Basic bar"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic bar"
def test_proxy_authentication_header_proxy_with_same_credentials(self):
middleware = HttpProxyMiddleware()
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
request = Request(
"https://example.com",
headers={"Proxy-Authorization": b"Basic " + encoded_credentials},
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_proxy_authentication_header_proxy_with_different_credentials(self):
middleware = HttpProxyMiddleware()
encoded_credentials1 = middleware._basic_auth_header(
"user1",
"password1",
)
request = Request(
"https://example.com",
headers={"Proxy-Authorization": b"Basic " + encoded_credentials1},
meta={"proxy": "https://user2:password2@example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials2 = middleware._basic_auth_header(
"user2",
"password2",
)
assert (
request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials2
)
|
TestHttpProxyMiddleware
|
python
|
google__jax
|
jax/_src/error_check.py
|
{
"start": 8774,
"end": 12960
}
|
class ____:
"""A class to store error information for AOT compilation.
This class is used internally by the wrapper functions `wrap_for_export` and
`unwrap_from_import` to encapsulate error-related data within an exported
function.
Attributes:
error_code (jax.Array): A JAX array representing the final error state of
the function to be exported. This value is local to the wrapper function.
error_list (list[tuple[str, str]]): A list of `(error_message, traceback)`
pairs containing error messages and corresponding stack traces. This error
list is local to the wrapper function, and does not contain pairs of error
information from other functions.
"""
error_code: Array
error_list: list[tuple[str, str]]
tree_util.register_dataclass(
_ErrorClass, data_fields=("error_code",), meta_fields=("error_list",)
)
_export.register_pytree_node_serialization(
_ErrorClass,
serialized_name=f"{_ErrorClass.__module__}.{_ErrorClass.__name__}",
serialize_auxdata=lambda x: json.dumps(x, ensure_ascii=False).encode(
"utf-8"
),
deserialize_auxdata=lambda x: json.loads(x.decode("utf-8")),
)
def _traceback_to_str(traceback: TracebackType) -> str:
"""Convert a traceback to a string for export."""
return "".join(tb_lib.format_list(tb_lib.extract_tb(traceback))).rstrip("\n")
def wrap_for_export(f):
"""Wrap a function with error checking to make it compatible with AOT mode.
Error checking relies on global state, which cannot be serialized across
processes. This wrapper ensures that the error state remains within the
function scope, making it possible to export the function and later import in
other processes.
When the function is later imported, it must be wrapped with
:func:`unwrap_from_import` to integrate the error checking mechanism of the
imported function into the global error checking mechanism of the current
process.
This function should only be applied once to a function; wrapping the same
function multiple times is unnecessary.
"""
def inner(*args, **kwargs):
global _error_list
# 1. Save the old state and initialize a new state.
with core.eval_context():
old_ref = _error_storage.ref
_initialize_error_code_ref()
with _error_list_lock:
old_error_list, _error_list = _error_list, []
# 2. Trace the function.
out = f(*args, **kwargs)
error_code = _error_storage.ref[...].min()
# 3. Restore the old state.
_error_list, new_error_list = old_error_list, _error_list
with core.eval_context():
_error_storage.ref = old_ref
new_error_list = [
(msg, _traceback_to_str(traceback)) for msg, traceback in new_error_list
]
return out, _ErrorClass(error_code, new_error_list)
return inner
def unwrap_from_import(f):
"""Unwrap a function after AOT import to restore error checking.
When an AOT-exported function is imported in a new process, its error state is
separate from the global error state of the current process. This wrapper
ensures that errors detected during execution are correctly integrated into
the global error checking mechanism of the current process.
This function should only be applied to functions that were previously wrapped
with :func:`wrap_for_export` before export.
"""
if _error_storage.ref is None:
with core.eval_context():
_initialize_error_code_ref()
assert _error_storage.ref is not None
def inner(*args, **kwargs):
out, error_class = f(*args, **kwargs)
new_error_code, error_list = error_class.error_code, error_class.error_list
# Update the global error list.
with _error_list_lock:
offset = len(_error_list)
_error_list.extend(error_list)
# Update the global error code array.
error_code = _error_storage.ref[...]
should_update = lax.bitwise_and(
error_code == np.uint32(_NO_ERROR),
new_error_code != np.uint32(_NO_ERROR),
)
error_code = lax.select(should_update, new_error_code + offset, error_code)
# TODO(ayx): support vmap and shard_map.
_error_storage.ref[...] = error_code
return out
return inner
|
_ErrorClass
|
python
|
bokeh__bokeh
|
src/bokeh/models/scales.py
|
{
"start": 3510,
"end": 4530
}
|
class ____(Scale):
''' Represent a composition of two scales, which useful for defining
sub-coordinate systems.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
source_scale = Required(Instance(Scale), help="""
The source scale.
""")
target_scale = Required(Instance(Scale), help="""
The target scale.
""")
# TODO assert source_scale != target_scale
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
CompositeScale
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_less_than_or_equal_to_threshold.py
|
{
"start": 936,
"end": 5605
}
|
class ____(
DataProfilerProfileMetricProvider
):
metric_name = "data_profiler.profile_numeric_columns_diff_less_than_or_equal_to_threshold"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - too complex
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_diff = metrics.get("data_profiler.profile_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_diff["data_stats"]
requested_columns = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
for stat, threshold in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
is_less = is_value_less_than_or_equal_to_threshold(diff_val, threshold)
if not is_less:
requested_columns[col][stat] = {
"threshold": threshold,
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_diff_less_than_or_equal_to_threshold"
):
dependencies["data_profiler.profile_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
|
DataProfilerProfileNumericColumnsDiffLessThanOrEqualToThreshold
|
python
|
pypa__setuptools
|
setuptools/_scripts.py
|
{
"start": 547,
"end": 3075
}
|
class ____(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options: list[str] = []
split_args = _SplitArgs()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param: Self | str | Iterable[str] | None) -> Self:
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, str):
return cls.from_string(param)
if isinstance(param, Iterable):
return cls(param)
if param is None:
return cls.from_environment()
raise TypeError(f"Argument has an unsupported type {type(param)}")
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string: str) -> Self:
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text: str):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items
)
return '#!' + cmdline + '\n'
|
CommandSpec
|
python
|
django-debug-toolbar__django-debug-toolbar
|
debug_toolbar/panels/sql/tracking.py
|
{
"start": 1007,
"end": 3773
}
|
class ____(Exception):
"""Thrown when template panel triggers a query"""
def wrap_cursor(connection):
# When running a SimpleTestCase, Django monkey patches some DatabaseWrapper
# methods, including .cursor() and .chunked_cursor(), to raise an exception
# if the test code tries to access the database, and then undoes the monkey
# patching when the test case is finished. If we monkey patch those methods
# also, Django's process of undoing those monkey patches will fail. To
# avoid this failure, and because database access is not allowed during a
# SimpleTestCase anyway, skip applying our instrumentation monkey patches if
# we detect that Django has already monkey patched DatabaseWrapper.cursor().
if isinstance(connection.cursor, django.test.testcases._DatabaseFailure):
return
if not hasattr(connection, "_djdt_cursor"):
connection._djdt_cursor = connection.cursor
connection._djdt_chunked_cursor = connection.chunked_cursor
connection._djdt_logger = None
def cursor(*args, **kwargs):
# Per the DB API cursor() does not accept any arguments. There's
# some code in the wild which does not follow that convention,
# so we pass on the arguments even though it's not clean.
# See:
# https://github.com/django-commons/django-debug-toolbar/pull/615
# https://github.com/django-commons/django-debug-toolbar/pull/896
logger = connection._djdt_logger
cursor = connection._djdt_cursor(*args, **kwargs)
if logger is None:
return cursor
mixin = NormalCursorMixin if allow_sql.get() else ExceptionCursorMixin
return patch_cursor_wrapper_with_mixin(cursor.__class__, mixin)(
cursor.cursor, connection, logger
)
def chunked_cursor(*args, **kwargs):
# prevent double wrapping
# solves https://github.com/django-commons/django-debug-toolbar/issues/1239
logger = connection._djdt_logger
cursor = connection._djdt_chunked_cursor(*args, **kwargs)
if logger is not None and not isinstance(cursor, DjDTCursorWrapperMixin):
mixin = NormalCursorMixin if allow_sql.get() else ExceptionCursorMixin
return patch_cursor_wrapper_with_mixin(cursor.__class__, mixin)(
cursor.cursor, connection, logger
)
return cursor
connection.cursor = cursor
connection.chunked_cursor = chunked_cursor
def patch_cursor_wrapper_with_mixin(base_wrapper, mixin):
class DjDTCursorWrapper(mixin, base_wrapper):
pass
return DjDTCursorWrapper
|
SQLQueryTriggered
|
python
|
jazzband__django-formtools
|
formtools/wizard/storage/session.py
|
{
"start": 32,
"end": 523
}
|
class ____(BaseStorage):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.prefix not in self.request.session:
self.init_data()
def _get_data(self):
self.request.session.modified = True
return self.request.session[self.prefix]
def _set_data(self, value):
self.request.session[self.prefix] = value
self.request.session.modified = True
data = property(_get_data, _set_data)
|
SessionStorage
|
python
|
django__django
|
django/contrib/gis/db/models/lookups.py
|
{
"start": 440,
"end": 3654
}
|
class ____(Lookup):
sql_template = None
transform_func = None
distance = False
band_rhs = None
band_lhs = None
def __init__(self, lhs, rhs):
rhs, *self.rhs_params = rhs if isinstance(rhs, (list, tuple)) else (rhs,)
super().__init__(lhs, rhs)
self.template_params = {}
self.process_rhs_params()
def process_rhs_params(self):
if self.rhs_params:
# Check if a band index was passed in the query argument.
if len(self.rhs_params) == (2 if self.lookup_name == "relate" else 1):
self.process_band_indices()
elif len(self.rhs_params) > 1:
raise ValueError("Tuple too long for lookup %s." % self.lookup_name)
elif isinstance(self.lhs, RasterBandTransform):
self.process_band_indices(only_lhs=True)
def process_band_indices(self, only_lhs=False):
"""
Extract the lhs band index from the band transform class and the rhs
band index from the input tuple.
"""
# PostGIS band indices are 1-based, so the band index needs to be
# increased to be consistent with the GDALRaster band indices.
if only_lhs:
self.band_rhs = 1
self.band_lhs = self.lhs.band_index + 1
return
if isinstance(self.lhs, RasterBandTransform):
self.band_lhs = self.lhs.band_index + 1
else:
self.band_lhs = 1
self.band_rhs, *self.rhs_params = self.rhs_params
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
return ("%s", (connection.ops.Adapter(value),))
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, Query):
# If rhs is some Query, don't touch it.
return super().process_rhs(compiler, connection)
if isinstance(self.rhs, Expression):
self.rhs = self.rhs.resolve_expression(compiler.query)
rhs, rhs_params = super().process_rhs(compiler, connection)
placeholder = connection.ops.get_geom_placeholder(
self.lhs.output_field, self.rhs, compiler
)
return placeholder % rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should
# return an object (SpatialOperator) with an as_sql() method to allow
# for more complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params = (*lhs_params, *rhs_params)
template_params = {
"lhs": lhs_sql,
"rhs": rhs_sql,
"value": "%s",
**self.template_params,
}
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
@BaseSpatialField.register_lookup
|
GISLookup
|
python
|
pytorch__pytorch
|
test/inductor/test_cache.py
|
{
"start": 12307,
"end": 21057
}
|
class ____(TestMixin, TestCase):
@parametrize("async_cache_type", TestMixin.async_cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_get_async(
self: Self,
async_cache_type: type[icache.AsyncCache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Verifies that asynchronous get and insert operations work as expected:
# get_async returns None for missing keys, insert_async inserts values,
# and get_async returns the correct value after insertion.
if not self.cache_type_supports_key_and_value_types(
async_cache_type, key_type, value_type
):
return
async_cache: icache.AsyncCache = async_cache_type()
self.maybe_randomize_base_dir(async_cache)
key_1, key_2 = self.keys_not_in(async_cache, lambda: self.key(key_type), 2)
value_1, value_2 = self.values_unalike(lambda: self.value(value_type), 2)
executor = ThreadPoolExecutor()
get_1 = async_cache.get_async(key_1, executor)
get_2 = async_cache.get_async(key_2, executor)
self.assertIsNone(get_1.result())
self.assertIsNone(get_2.result())
insert_1 = async_cache.insert_async(key_1, value_1, executor)
insert_2 = async_cache.insert_async(key_2, value_2, executor)
self.assertTrue(insert_1.result())
self.assertTrue(insert_2.result())
get_1 = async_cache.get_async(key_1, executor)
get_2 = async_cache.get_async(key_2, executor)
self.assertEqual(get_1.result(), value_1)
self.assertEqual(get_2.result(), value_2)
executor.shutdown()
@parametrize("async_cache_type", TestMixin.async_cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_insert_async(
self: Self,
async_cache_type: type[icache.AsyncCache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Ensures that only one of two concurrent insert_async calls for the same key succeeds,
# and the cache contains the value from the successful insert.
if not self.cache_type_supports_key_and_value_types(
async_cache_type, key_type, value_type
):
return
async_cache: icache.AsyncCache = async_cache_type()
self.maybe_randomize_base_dir(async_cache)
key = self.key_not_in(async_cache, lambda: self.key(key_type))
value_1, value_2 = self.values_unalike(lambda: self.value(value_type), 2)
executor = ThreadPoolExecutor()
get = async_cache.get_async(key, executor)
self.assertIsNone(get.result())
insert_1 = async_cache.insert_async(key, value_1, executor)
insert_2 = async_cache.insert_async(key, value_2, executor)
self.assertTrue(insert_1.result() ^ insert_2.result())
get = async_cache.get_async(key, executor)
if insert_1.result():
self.assertEqual(get.result(), value_1)
else:
self.assertEqual(get.result(), value_2)
executor.shutdown()
@parametrize("async_cache_type", TestMixin.async_cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_get_async_concurrent(
self: Self,
async_cache_type: type[icache.AsyncCache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Ensures that concurrent asynchronous get operations return the correct values
# for all inserted keys.
if not self.cache_type_supports_key_and_value_types(
async_cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 100
async_cache: icache.AsyncCache = async_cache_type()
self.maybe_randomize_base_dir(async_cache)
keys = self.keys_not_in(async_cache, lambda: self.key(key_type), iters)
values = self.values_unalike(lambda: self.value(value_type), iters)
for key, value in zip(keys, values):
self.assertIsNone(async_cache.get(key))
self.assertTrue(async_cache.insert(key, value))
gets = executor.map(lambda key: async_cache.get_async(key, executor), keys)
for value, get in zip(values, gets):
self.assertEqual(get.result(), value)
executor.shutdown()
@parametrize("async_cache_type", TestMixin.async_cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_insert_async_concurrent(
self: Self,
async_cache_type: type[icache.AsyncCache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Ensures that concurrent asynchronous insert operations only allow the first insert
# for each key to succeed, and the cache contains the correct value for each key.
if not self.cache_type_supports_key_and_value_types(
async_cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 50
async_cache: icache.AsyncCache = async_cache_type()
self.maybe_randomize_base_dir(async_cache)
keys = self.keys_not_in(async_cache, lambda: self.key(key_type), iters) * 2
values = self.values_unalike(lambda: self.value(value_type), iters * 2)
for key in keys:
self.assertIsNone(async_cache.get(key))
inserts = executor.map(
lambda key, value: async_cache.insert_async(key, value, executor),
keys,
values,
)
inserted = {}
for key, value, insert in zip(keys, values, inserts):
if insert.result():
self.assertEqual(async_cache.get(key), value)
self.assertTrue(key not in inserted)
inserted[key] = value
self.assertTrue(set(keys) == set(inserted.keys()))
for key, value in inserted.items():
self.assertTrue(async_cache.get(key), value)
executor.shutdown()
@parametrize("async_cache_type", TestMixin.async_cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
@parametrize("get_first", [True, False])
def test_combo_async_concurrent(
self: Self,
async_cache_type: type[icache.AsyncCache],
key_type: type[icache.Key],
value_type: type[icache.Value],
get_first: bool,
) -> None:
# Tests a mix of concurrent asynchronous get and insert operations, with the order
# of operations varied by the get_first parameter, to ensure correctness under
# interleaved async access.
if not self.cache_type_supports_key_and_value_types(
async_cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 50
async_cache: icache.AsyncCache = async_cache_type()
self.maybe_randomize_base_dir(async_cache)
keys = self.keys_not_in(async_cache, lambda: self.key(key_type), iters) * 2
values = self.values_unalike(lambda: self.value(value_type), iters * 2)
for key in keys:
self.assertIsNone(async_cache.get(key))
get_futures, insert_futures = [], []
for key, value in zip(keys, values):
if get_first:
get_futures.append(async_cache.get_async(key, executor))
insert_futures.append(async_cache.insert_async(key, value, executor))
else:
insert_futures.append(async_cache.insert_async(key, value, executor))
get_futures.append(async_cache.get_async(key, executor))
inserted = {}
for key, value, get_future, insert_future in zip(
keys, values, get_futures, insert_futures
):
if (get := get_future.result()) is not None:
if insert_future.result():
self.assertEqual(get, value)
self.assertTrue(key not in inserted)
inserted[key] = value
else:
if insert_future.result():
self.assertTrue(key not in inserted)
inserted[key] = value
self.assertTrue(set(keys) == set(inserted.keys()))
for key, value in inserted.items():
self.assertEqual(async_cache.get(key), value)
executor.shutdown()
@instantiate_parametrized_tests
|
AsyncCacheTest
|
python
|
django__django
|
django/db/backends/postgresql/compiler.py
|
{
"start": 336,
"end": 581
}
|
class ____(list):
"""
Sentinel value to signal DatabaseOperations.bulk_insert_sql() that the
UNNEST strategy should be used for the bulk insert.
"""
def __str__(self):
return "UNNEST(%s)" % ", ".join(self)
|
InsertUnnest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagstermill/dagstermill/manager.py
|
{
"start": 2299,
"end": 2789
}
|
class ____(EventGenerationManager):
"""Utility class to explicitly manage setup/teardown of resource events. Overrides the default
`generate_teardown_events` method so that teardown is deferred until explicitly called by the
dagstermill Manager.
"""
def generate_teardown_events(self):
return iter(())
def teardown(self):
return [teardown_event for teardown_event in super().generate_teardown_events()]
@beta
|
DagstermillResourceEventGenerationManager
|
python
|
Textualize__textual
|
docs/examples/styles/link_color.py
|
{
"start": 64,
"end": 747
}
|
class ____(App):
CSS_PATH = "link_color.tcss"
def compose(self):
yield Label(
"Visit the [link='https://textualize.io']Textualize[/link] website.",
id="lbl1", # (1)!
)
yield Label(
"Click [@click=app.bell]here[/] for the bell sound.",
id="lbl2", # (2)!
)
yield Label(
"You can also click [@click=app.bell]here[/] for the bell sound.",
id="lbl3", # (3)!
)
yield Label(
"[@click=app.quit]Exit this application.[/]",
id="lbl4", # (4)!
)
if __name__ == "__main__":
app = LinkColorApp()
app.run()
|
LinkColorApp
|
python
|
scrapy__scrapy
|
tests/test_loader.py
|
{
"start": 5950,
"end": 6033
}
|
class ____(InitializationTestMixin):
item_class = dict
|
TestInitializationFromDict
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
|
{
"start": 66838,
"end": 71832
}
|
class ____(Wav2Vec2ConformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of Wav2Vec2Conformer adapters (config.add_adapter=True)"
)
self.wav2vec2_conformer = Wav2Vec2ConformerModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2_conformer.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2_conformer.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2ConformerProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2_conformer(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
Wav2Vec2ConformerForSequenceClassification
|
python
|
numba__numba
|
numba/core/caching.py
|
{
"start": 3732,
"end": 4671
}
|
class ____(object):
"""
A cache locator mixin for functions which are backed by a well-known
Python source file.
"""
def get_source_stamp(self):
if getattr(sys, 'frozen', False):
st = os.stat(sys.executable)
else:
st = os.stat(self._py_file)
# We use both timestamp and size as some filesystems only have second
# granularity.
return st.st_mtime, st.st_size
def get_disambiguator(self):
return str(self._lineno)
@classmethod
def from_function(cls, py_func, py_file):
if not os.path.exists(py_file):
# Perhaps a placeholder (e.g. "<ipython-XXX>")
return
self = cls(py_func, py_file)
try:
self.ensure_cache_path()
except OSError:
# Cannot ensure the cache directory exists or is writable
return
return self
|
_SourceFileBackedLocatorMixin
|
python
|
django__django
|
tests/delete/models.py
|
{
"start": 5587,
"end": 5658
}
|
class ____(models.Model):
m2m = models.ManyToManyField(M2MTo)
|
M2MFrom
|
python
|
walkccc__LeetCode
|
solutions/1002. Find Common Characters/1002.py
|
{
"start": 0,
"end": 192
}
|
class ____:
def commonChars(self, words: list[str]) -> list[str]:
return functools.reduce(lambda a, b: a & b,
map(collections.Counter, words)).elements()
|
Solution
|
python
|
google__pytype
|
pytype/tools/xref/indexer.py
|
{
"start": 39234,
"end": 41268
}
|
class ____(source.AbstractTrace):
def __repr__(self):
types_repr = tuple(
t and [node_utils.typename(x) for x in t]
for t in self.types)
return f"{super().__repr__()} {types_repr}"
def process_file(options, source_text=None, generate_callgraphs=False,
preserve_pytype_vm=False):
"""Process a single file and return cross references.
Args:
options: A dictionary of pytype options.
source_text: Optional text of the file; will be read from the file pointed
to by options.input if not supplied.
generate_callgraphs: Collect call graph information
preserve_pytype_vm: Preserve the pytype vm in the indexer
Returns:
The Indexer object used for indexing.
Raises:
PytypeError if pytype fails.
"""
with config.verbosity_from(options):
loader = load_pytd.create_loader(options)
src = source_text or io.read_source_file(options.input)
with io.wrap_pytype_exceptions(PytypeError, filename=options.input):
ret = analyze.infer_types(
src=src,
options=options,
loader=loader)
pytd_module = ret.ast
# pylint: disable=unexpected-keyword-arg
ast_root_node = astlib.parse(src, options.input,
feature_version=options.python_version[1])
# pylint: enable=unexpected-keyword-arg
# TODO(mdemello): Get from args
module_name = "module"
src_code = source.Code(
src, ret.context.vm.opcode_traces, VmTrace, filename=options.input)
ix = Indexer(
ast=astlib,
src=src_code,
loader=loader,
module_name=module_name,
pytd_module=pytd_module)
ix.index(ast_root_node)
ix.finalize()
# Make the vm available via indexer.vm for post-finalize() functions.
ix.vm = ret.context.vm
# Use the indexer as a single object to hold data for calling processes.
if generate_callgraphs:
ix.function_map = callgraph.collect_function_map(ix)
# Release the vm before returning
if not preserve_pytype_vm:
ix.vm = None
return ix
|
VmTrace
|
python
|
cherrypy__cherrypy
|
cherrypy/process/win32.py
|
{
"start": 2464,
"end": 4380
}
|
class ____(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event
objects.
"""
def __init__(self):
"""Initialize a Win32 bus implementation."""
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(
None,
0,
0,
'WSPBus %s Event (pid=%r)' % (state.name, os.getpid()),
)
self.events[state] = event
return event
@property
def state(self):
"""The bus state."""
return self._state
@state.setter
def state(self, value):
"""Set the bus state."""
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(
events,
0,
win32event.INFINITE,
)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
|
Win32Bus
|
python
|
django__django
|
tests/forms_tests/field_tests/test_jsonfield.py
|
{
"start": 244,
"end": 4844
}
|
class ____(SimpleTestCase):
def test_valid(self):
field = JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {"a": "b"})
def test_valid_empty(self):
field = JSONField(required=False)
self.assertIsNone(field.clean(""))
self.assertIsNone(field.clean(None))
def test_invalid(self):
field = JSONField()
with self.assertRaisesMessage(ValidationError, "Enter a valid JSON."):
field.clean("{some badly formed: json}")
def test_prepare_value(self):
field = JSONField()
self.assertEqual(field.prepare_value({"a": "b"}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), "null")
self.assertEqual(field.prepare_value("foo"), '"foo"')
self.assertEqual(field.prepare_value("你好,世界"), '"你好,世界"')
self.assertEqual(field.prepare_value({"a": "😀🐱"}), '{"a": "😀🐱"}')
self.assertEqual(
field.prepare_value(["你好,世界", "jaźń"]),
'["你好,世界", "jaźń"]',
)
def test_widget(self):
field = JSONField()
self.assertIsInstance(field.widget, Textarea)
def test_custom_widget_kwarg(self):
field = JSONField(widget=TextInput)
self.assertIsInstance(field.widget, TextInput)
def test_custom_widget_attribute(self):
"""The widget can be overridden with an attribute."""
class CustomJSONField(JSONField):
widget = TextInput
field = CustomJSONField()
self.assertIsInstance(field.widget, TextInput)
def test_converted_value(self):
field = JSONField(required=False)
tests = [
'["a", "b", "c"]',
'{"a": 1, "b": 2}',
"1",
"1.5",
'"foo"',
"true",
"false",
"null",
]
for json_string in tests:
with self.subTest(json_string=json_string):
val = field.clean(json_string)
self.assertEqual(field.clean(val), val)
def test_has_changed(self):
field = JSONField()
self.assertIs(field.has_changed({"a": True}, '{"a": 1}'), True)
self.assertIs(field.has_changed({"a": 1, "b": 2}, '{"b": 2, "a": 1}'), False)
def test_custom_encoder_decoder(self):
class CustomDecoder(json.JSONDecoder):
def __init__(self, object_hook=None, *args, **kwargs):
return super().__init__(object_hook=self.as_uuid, *args, **kwargs)
def as_uuid(self, dct):
if "uuid" in dct:
dct["uuid"] = uuid.UUID(dct["uuid"])
return dct
value = {"uuid": uuid.UUID("{c141e152-6550-4172-a784-05448d98204b}")}
encoded_value = '{"uuid": "c141e152-6550-4172-a784-05448d98204b"}'
field = JSONField(encoder=DjangoJSONEncoder, decoder=CustomDecoder)
self.assertEqual(field.prepare_value(value), encoded_value)
self.assertEqual(field.clean(encoded_value), value)
def test_formfield_disabled(self):
class JSONForm(Form):
json_field = JSONField(disabled=True)
form = JSONForm({"json_field": '["bar"]'}, initial={"json_field": ["foo"]})
self.assertIn("["foo"]</textarea>", form.as_p())
def test_redisplay_none_input(self):
class JSONForm(Form):
json_field = JSONField(required=True)
tests = [
{},
{"json_field": None},
]
for data in tests:
with self.subTest(data=data):
form = JSONForm(data)
self.assertEqual(form["json_field"].value(), "null")
self.assertIn("null</textarea>", form.as_p())
self.assertEqual(form.errors["json_field"], ["This field is required."])
def test_redisplay_wrong_input(self):
"""
Displaying a bound form (typically due to invalid input). The form
should not overquote JSONField inputs.
"""
class JSONForm(Form):
name = CharField(max_length=2)
json_field = JSONField()
# JSONField input is valid, name is too long.
form = JSONForm({"name": "xyz", "json_field": '["foo"]'})
self.assertNotIn("json_field", form.errors)
self.assertIn("["foo"]</textarea>", form.as_p())
# Invalid JSONField.
form = JSONForm({"name": "xy", "json_field": '{"foo"}'})
self.assertEqual(form.errors["json_field"], ["Enter a valid JSON."])
self.assertIn("{"foo"}</textarea>", form.as_p())
|
JSONFieldTest
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/base/llms/types.py
|
{
"start": 732,
"end": 973
}
|
class ____(str, Enum):
"""Message role."""
SYSTEM = "system"
DEVELOPER = "developer"
USER = "user"
ASSISTANT = "assistant"
FUNCTION = "function"
TOOL = "tool"
CHATBOT = "chatbot"
MODEL = "model"
|
MessageRole
|
python
|
doocs__leetcode
|
solution/3700-3799/3751.Total Waviness of Numbers in Range I/Solution.py
|
{
"start": 0,
"end": 607
}
|
class ____:
def totalWaviness(self, num1: int, num2: int) -> int:
def f(x: int) -> int:
nums = []
while x:
nums.append(x % 10)
x //= 10
m = len(nums)
if m < 3:
return 0
s = 0
for i in range(1, m - 1):
if nums[i] > nums[i - 1] and nums[i] > nums[i + 1]:
s += 1
elif nums[i] < nums[i - 1] and nums[i] < nums[i + 1]:
s += 1
return s
return sum(f(x) for x in range(num1, num2 + 1))
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.