language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 5824,
"end": 5959
} | class ____(AbstractInheritanceTestModelParent):
class Meta:
app_label = "django_extensions"
| AbstractInheritanceTestModelChild |
python | getsentry__sentry | src/sentry/models/release.py | {
"start": 6512,
"end": 36698
} | class ____(Model):
"""
A release is generally created when a new version is pushed into a
production state.
A commit is generally a git commit. See also releasecommit.py
"""
__relocation_scope__ = RelocationScope.Excluded
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField(
"sentry.Project", related_name="releases", through=ReleaseProject
)
status = BoundedPositiveIntegerField(
default=ReleaseStatus.OPEN,
null=True,
choices=(
(ReleaseStatus.OPEN, _("Open")),
(ReleaseStatus.ARCHIVED, _("Archived")),
),
)
version = models.CharField(max_length=DB_VERSION_LENGTH)
# ref might be the branch name being released
ref = models.CharField(max_length=DB_VERSION_LENGTH, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now, db_index=True)
# DEPRECATED - not available in UI or editable from API
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = LegacyTextJSONField(default=dict)
# generally the release manager, or the person initiating the process
owner_id = HybridCloudForeignKey("sentry.User", on_delete="SET_NULL", null=True, blank=True)
# materialized stats
commit_count = BoundedPositiveIntegerField(null=True, default=0)
last_commit_id = BoundedBigIntegerField(null=True)
authors = ArrayField(models.TextField(), default=list, null=True)
total_deploys = BoundedPositiveIntegerField(null=True, default=0)
last_deploy_id = BoundedPositiveIntegerField(null=True)
# Denormalized semver columns. These will be filled if `version` matches at least
# part of our more permissive model of semver:
# `<package>@<major>.<minor>.<patch>.<revision>-<prerelease>+<build_code>
package = models.TextField(null=True)
major = models.BigIntegerField(null=True)
minor = models.BigIntegerField(null=True)
patch = models.BigIntegerField(null=True)
revision = models.BigIntegerField(null=True)
prerelease = models.TextField(null=True)
build_code = models.TextField(null=True)
# If `build_code` can be parsed as a 64 bit int we'll store it here as well for
# sorting/comparison purposes
build_number = models.BigIntegerField(null=True)
# HACK HACK HACK
# As a transitional step we permit release rows to exist multiple times
# where they are "specialized" for a specific project. The goal is to
# later split up releases by project again. This is for instance used
# by the org release listing.
_for_project_id: int | None = None
# the user agent that set the release
user_agent = models.TextField(null=True)
# Custom Model Manager required to override create method
objects: ClassVar[ReleaseModelManager] = ReleaseModelManager()
class Meta:
app_label = "sentry"
db_table = "sentry_release"
unique_together = (("organization", "version"),)
indexes = [
models.Index(
fields=["organization", "version"],
opclasses=["", "text_pattern_ops"],
name="sentry_release_version_btree",
),
# We also use a functional index to order `prerelease` according to semver rules,
IndexWithPostgresNameLimits(
"organization",
"package",
F("major").desc(),
F("minor").desc(),
F("patch").desc(),
F("revision").desc(),
Case(When(prerelease="", then=1), default=0).desc(),
F("prerelease").desc(),
name="sentry_release_semver_by_package_idx",
),
models.Index(
"organization",
F("major").desc(),
F("minor").desc(),
F("patch").desc(),
F("revision").desc(),
Case(When(prerelease="", then=1), default=0).desc(),
F("prerelease").desc(),
name="sentry_release_semver_idx",
),
models.Index(fields=("organization", "build_code")),
models.Index(fields=("organization", "build_number")),
models.Index(fields=("organization", "date_added")),
models.Index(fields=("organization", "status")),
]
__repr__ = sane_repr("organization_id", "version")
SEMVER_COLS = ["major", "minor", "patch", "revision", "prerelease_case", "prerelease"]
def __eq__(self, other: object) -> bool:
"""Make sure that specialized releases are only comparable to the same
other specialized release. This for instance lets us treat them
separately for serialization purposes.
"""
return (
# don't treat `NotImplemented` as truthy
Model.__eq__(self, other) is True
and isinstance(other, Release)
and self._for_project_id == other._for_project_id
)
def __hash__(self):
# https://code.djangoproject.com/ticket/30333
return super().__hash__()
@staticmethod
def is_valid_version(value):
if value is None:
return False
if any(c in value for c in BAD_RELEASE_CHARS):
return False
value_stripped = str(value).strip()
return not (
not value_stripped
or value_stripped in (".", "..")
or value_stripped.lower() == "latest"
)
@property
def is_semver_release(self):
return self.package is not None
def get_previous_release(self, project):
"""Get the release prior to this one. None if none exists"""
return (
ReleaseProject.objects.filter(project=project, release__date_added__lt=self.date_added)
.order_by("-release__date_added")
.first()
)
@staticmethod
def is_semver_version(version):
"""
Method that checks if a version follows semantic versioning
"""
# If version is not a valid release version, or it has no package then we return False
if not Release.is_valid_version(version) or "@" not in version:
return False
try:
version_info = parse_release(version, json_loads=orjson.loads)
version_parsed = version_info.get("version_parsed")
return version_parsed is not None and all(
validate_bigint(version_parsed[field])
for field in ("major", "minor", "patch", "revision")
)
except RelayError:
# This can happen on invalid legacy releases
return False
@staticmethod
def is_release_newer_or_equal(org_id, release, other_release):
if release is None:
return False
if other_release is None:
return True
if release == other_release:
return True
releases = {
release.version: float(release.date_added.timestamp())
for release in Release.objects.filter(
organization_id=org_id, version__in=[release, other_release]
)
}
release_date = releases.get(release)
other_release_date = releases.get(other_release)
if release_date is not None and other_release_date is not None:
return release_date > other_release_date
return False
@property
def semver_tuple(self) -> SemverVersion:
return SemverVersion(
self.major,
self.minor,
self.patch,
self.revision,
1 if self.prerelease == "" else 0,
self.prerelease,
)
@classmethod
def get_cache_key(cls, organization_id, version) -> str:
return f"release:3:{organization_id}:{md5_text(version).hexdigest()}"
@classmethod
def get_lock_key(cls, organization_id, release_id) -> str:
return f"releasecommits:{organization_id}:{release_id}"
@classmethod
def get(cls, project, version):
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release is None:
try:
release = cls.objects.get(
organization_id=project.organization_id, projects=project, version=version
)
except cls.DoesNotExist:
release = -1
cache.set(cache_key, release, 300)
if release == -1:
return
return release
@classmethod
def get_or_create(cls, project, version, date_added=None):
with metrics.timer("models.release.get_or_create") as metric_tags:
return cls._get_or_create_impl(project, version, date_added, metric_tags)
@classmethod
def _get_or_create_impl(cls, project, version, date_added, metric_tags):
from sentry.models.project import Project
if date_added is None:
date_added = timezone.now()
cache_key = cls.get_cache_key(project.organization_id, version)
release = cache.get(cache_key)
if release in (None, -1):
# TODO(dcramer): if the cache result is -1 we could attempt a
# default create here instead of default get
project_version = (f"{project.slug}-{version}")[:DB_VERSION_LENGTH]
releases = list(
cls.objects.filter(
organization_id=project.organization_id,
version__in=[version, project_version],
projects=project,
)
)
if releases:
try:
release = [r for r in releases if r.version == project_version][0]
except IndexError:
release = releases[0]
metric_tags["created"] = "false"
else:
try:
with atomic_transaction(using=router.db_for_write(cls)):
release = cls.objects.create(
organization_id=project.organization_id,
version=version,
date_added=date_added,
total_deploys=0,
)
metric_tags["created"] = "true"
except IntegrityError:
metric_tags["created"] = "false"
release = cls.objects.get(
organization_id=project.organization_id, version=version
)
# NOTE: `add_project` creates a ReleaseProject instance
release.add_project(project)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
# TODO(dcramer): upon creating a new release, check if it should be
# the new "latest release" for this project
cache.set(cache_key, release, 3600)
metric_tags["cache_hit"] = "false"
else:
metric_tags["cache_hit"] = "true"
return release
@cached_property
def version_info(self):
try:
return parse_release(self.version, json_loads=orjson.loads)
except RelayError:
# This can happen on invalid legacy releases
return None
@classmethod
def merge(cls, to_release, from_releases):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
from sentry.models.group import Group
from sentry.models.grouprelease import GroupRelease
from sentry.models.groupresolution import GroupResolution
from sentry.models.releasecommit import ReleaseCommit
from sentry.models.releaseenvironment import ReleaseEnvironment
from sentry.models.releasefile import ReleaseFile
from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment
from sentry.models.releases.release_project import ReleaseProject
model_list = (
ReleaseCommit,
ReleaseEnvironment,
ReleaseFile,
ReleaseProject,
ReleaseProjectEnvironment,
GroupRelease,
GroupResolution,
)
for release in from_releases:
for model in model_list:
if hasattr(model, "release"):
update_kwargs = {"release": to_release}
else:
update_kwargs = {"release_id": to_release.id}
try:
with atomic_transaction(using=router.db_for_write(model)):
model.objects.filter(release_id=release.id).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with atomic_transaction(using=router.db_for_write(model)):
model.objects.filter(id=item.id).update(**update_kwargs)
except IntegrityError:
item.delete()
Group.objects.filter(first_release=release).update(first_release=to_release)
release.delete()
def add_dist(self, name, date_added=None):
from sentry.models.distribution import Distribution
if date_added is None:
date_added = timezone.now()
return Distribution.objects.get_or_create(
release=self,
name=name,
defaults={"date_added": date_added, "organization_id": self.organization_id},
)[0]
def add_project(self, project):
"""
Add a project to this release.
Returns True if the project was added and did not already exist.
"""
from sentry.models.project import Project
try:
with atomic_transaction(using=router.db_for_write(ReleaseProject)):
obj, created = ReleaseProject.objects.get_or_create(project=project, release=self)
if not project.flags.has_releases:
project.flags.has_releases = True
project.update(flags=F("flags").bitor(Project.flags.has_releases))
except IntegrityError:
obj = None
created = False
return obj, created
def handle_commit_ranges(self, refs):
"""
Takes commit refs of the form:
[
{
'previousCommit': None,
'commit': 'previous_commit..commit',
}
]
Note: Overwrites 'previousCommit' and 'commit'
"""
for ref in refs:
if COMMIT_RANGE_DELIMITER in ref["commit"]:
ref["previousCommit"], ref["commit"] = ref["commit"].split(COMMIT_RANGE_DELIMITER)
def set_refs(self, refs, user_id, fetch=False):
with sentry_sdk.start_span(op="set_refs"):
from sentry.api.exceptions import InvalidRepository
from sentry.models.releaseheadcommit import ReleaseHeadCommit
from sentry.models.repository import Repository
from sentry.tasks.commits import fetch_commits
names = {r["repository"] for r in refs}
repos = list(
Repository.objects.filter(organization_id=self.organization_id, name__in=names)
)
repos_by_name = {r.name: r for r in repos}
invalid_repos = names - set(repos_by_name.keys())
if invalid_repos:
raise InvalidRepository(f"Invalid repository names: {','.join(invalid_repos)}")
self.handle_commit_ranges(refs)
for ref in refs:
repo = repos_by_name[ref["repository"]]
commit = Commit.objects.get_or_create(
organization_id=self.organization_id, repository_id=repo.id, key=ref["commit"]
)[0]
# update head commit for repo/release if exists
ReleaseHeadCommit.objects.create_or_update(
organization_id=self.organization_id,
repository_id=repo.id,
release=self,
values={"commit": commit},
)
if fetch:
prev_release = get_previous_release(self)
fetch_commits.apply_async(
kwargs={
"release_id": self.id,
"user_id": user_id,
"refs": refs,
"prev_release_id": prev_release and prev_release.id,
}
)
@sentry_sdk.trace
def set_commits(self, commit_list):
"""
Bind a list of commits to this release.
This will clear any existing commit log and replace it with the given
commits.
"""
set_span_attribute("release.set_commits", len(commit_list))
from sentry.models.releases.set_commits import set_commits
set_commits(self, commit_list)
def safe_delete(self):
"""Deletes a release if possible or raises a `UnsafeReleaseDeletion`
exception.
"""
from sentry import release_health
from sentry.models.group import Group
from sentry.models.releasefile import ReleaseFile
# we don't want to remove the first_release metadata on the Group, and
# while people might want to kill a release (maybe to remove files),
# removing the release is prevented
if Group.objects.filter(first_release=self).exists():
raise UnsafeReleaseDeletion(ERR_RELEASE_REFERENCED)
# We do not allow releases with health data to be deleted because
# the upserting from snuba data would create the release again.
# We would need to be able to delete this data from snuba which we
# can't do yet.
project_ids = list(self.projects.values_list("id").all())
if release_health.backend.check_has_health_data(
[(p[0], self.version) for p in project_ids]
):
raise UnsafeReleaseDeletion(ERR_RELEASE_HEALTH_DATA)
# TODO(dcramer): this needs to happen in the queue as it could be a long
# and expensive operation
file_list = ReleaseFile.objects.filter(release_id=self.id).select_related("file")
for releasefile in file_list:
releasefile.file.delete()
releasefile.delete()
self.delete()
def count_artifacts(self):
"""Sum the artifact_counts of all release files.
An artifact count of NULL is interpreted as 1.
"""
counts = get_artifact_counts([self.id])
return counts.get(self.id, 0)
def count_artifacts_in_artifact_bundles(self, project_ids: Sequence[int]):
"""
Counts the number of artifacts in the artifact bundles associated with this release and a set of projects.
"""
qs = (
ArtifactBundle.objects.filter(
organization_id=self.organization.id,
releaseartifactbundle__release_name=self.version,
projectartifactbundle__project_id__in=project_ids,
)
.annotate(count=Sum(Func(F("artifact_count"), 1, function="COALESCE")))
.values_list("releaseartifactbundle__release_name", "count")
)
qs.query.group_by = ["releaseartifactbundle__release_name"]
if len(qs) == 0:
return None
return qs[0]
def clear_commits(self):
"""
Delete all release-specific commit data associated to this release. We will not delete the Commit model values because other releases may use these commits.
"""
with sentry_sdk.start_span(op="clear_commits"):
from sentry.models.releasecommit import ReleaseCommit
from sentry.models.releaseheadcommit import ReleaseHeadCommit
ReleaseHeadCommit.objects.filter(
organization_id=self.organization_id, release=self
).delete()
ReleaseCommit.objects.filter(
organization_id=self.organization_id, release=self
).delete()
self.authors = []
self.commit_count = 0
self.last_commit_id = None
self.save()
@classmethod
def get_unused_filter(cls, cutoff_date: datetime) -> Q:
"""
Returns a Q object that filters for unused releases.
This is the inverse of what makes a release "in use".
Note: This filter does NOT check for health data since that requires
external API calls. Health data check should be done separately.
"""
from django.db.models import Exists, OuterRef
from sentry.models.deploy import Deploy
from sentry.models.distribution import Distribution
from sentry.models.group import Group
from sentry.models.groupenvironment import GroupEnvironment
from sentry.models.grouphistory import GroupHistory
from sentry.models.grouprelease import GroupRelease
from sentry.models.groupresolution import GroupResolution
from sentry.models.latestreporeleaseenvironment import LatestRepoReleaseEnvironment
from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment
# Subquery for checking if any Group has this release as first_release
group_first_release_exists = Exists(Group.objects.filter(first_release=OuterRef("id")))
# Subquery for checking if LatestRepoReleaseEnvironment exists
latest_repo_exists = Exists(
LatestRepoReleaseEnvironment.objects.filter(release_id=OuterRef("id"))
)
# Subquery for checking if ReleaseProjectEnvironment has recent activity
recent_activity_exists = Exists(
ReleaseProjectEnvironment.objects.filter(
release_id=OuterRef("id"), last_seen__gte=cutoff_date
)
)
# Subquery for checking if there are recent deploys (within 90 days)
recent_deploys_exist = Exists(
Deploy.objects.filter(release_id=OuterRef("id"), date_finished__gte=cutoff_date)
)
# Subquery for checking if there are recent distributions (within 90 days)
recent_distributions_exist = Exists(
Distribution.objects.filter(release_id=OuterRef("id"), date_added__gte=cutoff_date)
)
# Subquery for checking if there are recent group releases (within 90 days)
recent_group_releases_exist = Exists(
GroupRelease.objects.filter(release_id=OuterRef("id"), last_seen__gte=cutoff_date)
)
# Subquery for checking if there are recent group resolutions (within 90 days)
recent_group_resolutions_exist = Exists(
GroupResolution.objects.filter(release_id=OuterRef("id"), datetime__gte=cutoff_date)
)
# Subquery for checking if GroupEnvironment has this release as first_release
group_environment_first_release_exists = Exists(
GroupEnvironment.objects.filter(first_release_id=OuterRef("id"))
)
# Subquery for checking if GroupHistory references this release
group_history_exists = Exists(GroupHistory.objects.filter(release_id=OuterRef("id")))
# Define what makes a release "in use" (should be kept)
keep_conditions = (
# Recently added releases
Q(date_added__gte=cutoff_date)
# Releases referenced as first_release by groups
| group_first_release_exists
# Releases referenced as first_release by group environments
| group_environment_first_release_exists
# Releases referenced by group history
| group_history_exists
# Releases with recent group resolutions (only recent ones, old ones can be cleaned up)
| recent_group_resolutions_exist
# Releases with recent distributions (only recent ones, old ones can be cleaned up)
| recent_distributions_exist
# Releases with recent deploys (only recent ones, old ones can be cleaned up)
| recent_deploys_exist
# Releases with recent group releases (only recent ones, old ones can be cleaned up)
| recent_group_releases_exist
# Releases with LatestRepoReleaseEnvironment
| latest_repo_exists
# Releases with recent activity
| recent_activity_exists
)
# Return the inverse - we want releases that DON'T meet any keep conditions
return cast(Q, ~keep_conditions)
def get_artifact_counts(release_ids: list[int]) -> Mapping[int, int]:
"""Get artifact count grouped by IDs"""
from sentry.models.releasefile import ReleaseFile
qs = (
ReleaseFile.objects.filter(release_id__in=release_ids)
.annotate(count=Sum(Func(F("artifact_count"), 1, function="COALESCE")))
.values_list("release_id", "count")
)
qs.query.group_by = ["release_id"]
return dict(qs)
def follows_semver_versioning_scheme(org_id, project_id, release_version=None):
"""
Checks if we should follow semantic versioning scheme for ordering based on
1. Latest ten releases of the project_id passed in all follow semver
2. provided release version argument is a valid semver version
Inputs:
* org_id
* project_id
* release_version
Returns:
Boolean that indicates if we should follow semantic version or not
"""
# TODO(ahmed): Move this function else where to be easily accessible for re-use
# TODO: this method could be moved to the Release model manager
cache_key = "follows_semver:1:%s" % hash_values([org_id, project_id])
follows_semver = cache.get(cache_key)
if follows_semver is None:
# Check if the latest ten releases are semver compliant
releases_list = list(
Release.objects.filter(
organization_id=org_id, projects__id__in=[project_id], status=ReleaseStatus.OPEN
)
.using_replica()
.order_by("-date_added")[:10]
)
if not releases_list:
cache.set(cache_key, False, 3600)
return False
# TODO(ahmed): re-visit/replace these conditions once we enable project wide `semver` setting
# A project is said to be following semver versioning schemes if it satisfies the following
# conditions:-
# 1: At least one semver compliant in the most recent 3 releases
# 2: At least 3 semver compliant releases in the most recent 10 releases
if len(releases_list) <= 2:
# Most recent release is considered to decide if project follows semver
follows_semver = releases_list[0].is_semver_release
elif len(releases_list) < 10:
# We forego condition 2 and it is enough if condition 1 is satisfied to consider this
# project to have semver compliant releases
follows_semver = any(release.is_semver_release for release in releases_list[0:3])
else:
# Count number of semver releases in the last ten
semver_matches = sum(map(lambda release: release.is_semver_release, releases_list))
at_least_three_in_last_ten = semver_matches >= 3
at_least_one_in_last_three = any(
release.is_semver_release for release in releases_list[0:3]
)
follows_semver = at_least_one_in_last_three and at_least_three_in_last_ten
cache.set(cache_key, follows_semver, 3600)
# Check release_version that is passed is semver compliant
if release_version:
follows_semver = follows_semver and Release.is_semver_version(release_version)
return follows_semver
def get_previous_release(release: Release) -> Release | None:
# NOTE: Keeping the below todo. Just optimizing the query.
#
# TODO: this does the wrong thing unless you are on the most
# recent release. Add a timestamp compare?
return (
Release.objects.filter(organization_id=release.organization_id)
.filter(
Exists(
ReleaseProject.objects.filter(
release=OuterRef("pk"),
project_id__in=ReleaseProject.objects.filter(release=release).values_list(
"project_id", flat=True
),
)
)
)
.extra(select={"sort": "COALESCE(date_released, date_added)"})
.exclude(version=release.version)
.order_by("-sort")
.first()
)
def filter_releases_by_projects(queryset: Any, project_ids: list[int]):
"""Return releases belonging to a project."""
if not project_ids:
return queryset
return queryset.filter(
Exists(
ReleaseProject.objects.filter(
release=OuterRef("pk"),
project_id__in=project_ids,
)
)
)
def filter_releases_by_environments(
queryset: Any,
project_ids: list[int],
environment_ids: list[int],
):
"""Return a release queryset filtered by environments."""
from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment
if not environment_ids:
return queryset
return queryset.filter(
Exists(
ReleaseProjectEnvironment.objects.filter(
release=OuterRef("pk"),
environment_id__in=environment_ids,
project_id__in=project_ids,
)
)
)
| Release |
python | pappasam__jedi-language-server | jedi_language_server/initialization_options.py | {
"start": 2513,
"end": 2691
} | class ____:
ignore_folders: List[str] = field(
default_factory=lambda: [".nox", ".tox", ".venv", "__pycache__"]
)
max_symbols: int = 20
@light_dataclass
| Symbols |
python | django__django | tests/admin_changelist/tests.py | {
"start": 2514,
"end": 79306
} | class ____(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", email="a@b.com", password="xxx"
)
def _create_superuser(self, username):
return User.objects.create_superuser(
username=username, email="a@b.com", password="xxx"
)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_repr(self):
m = ChildAdmin(Child, custom_site)
request = self.factory.get("/child/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(repr(cl), "<ChangeList: model=Child model_admin=ChildAdmin>")
def test_specified_ordering_by_f_expression(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ["name", "genres", "nr_of_members"]
ordering = (
F("nr_of_members").desc(nulls_last=True),
Upper(F("name")).asc(),
F("genres").asc(),
)
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get("/band/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: "desc", 2: "asc"})
def test_specified_ordering_by_f_expression_without_asc_desc(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ["name", "genres", "nr_of_members"]
ordering = (F("nr_of_members"), Upper("name"), F("genres"))
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get("/band/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: "asc", 2: "asc"})
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by
ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get("/child/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"parent": {}})
def test_select_related_preserved_when_multi_valued_in_search_fields(self):
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
# select_related is preserved.
self.assertEqual(cl.queryset.query.select_related, {"child": {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"player": {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ("band", "player")
def get_list_select_related(self, request):
return ("band", "player")
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get("/invitation/")
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {"player": {}, "band": {}})
def test_many_search_terms(self):
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel " * 80})
request.user = self.superuser
cl = m.get_changelist_instance(request)
with CaptureQueriesContext(connection) as context:
object_count = cl.queryset.count()
self.assertEqual(object_count, 1)
self.assertEqual(context.captured_queries[0]["sql"].count("JOIN"), 1)
def test_related_field_multiple_search_terms(self):
"""
Searches over multi-valued relationships return rows from related
models only when all searched fields match that row.
"""
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle", age=18)
Child.objects.create(parent=parent, name="Daniel", age=19)
m = ParentAdminTwoSearchFields(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "danielle 19"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel 19"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child, link, "name", '<td class="field-parent nowrap">-</td>'
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_empty_changelist_value_blank_string(self):
new_child = Child.objects.create(name="", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child, link, "-", '<td class="field-parent nowrap">-</td>'
)
self.assertInHTML(row_html, table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = "???"
m = ChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child, link, "name", '<td class="field-parent nowrap">???</td>'
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name="name", parent=None)
request = self.factory.get("/child/")
request.user = self.superuser
m = EmptyValueChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child,
link,
"name",
'<td class="field-age_display">&dagger;</td>'
'<td class="field-age">-empty-</td>',
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
link = reverse("admin:admin_changelist_child_change", args=(new_child.id,))
row_html = build_tbody_html(
new_child,
link,
"name",
'<td class="field-parent nowrap">%s</td>' % new_parent,
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
self.assertInHTML(
'<input type="checkbox" id="action-toggle" '
'aria-label="Select all objects on this page for an action">',
table_output,
)
def test_action_checkbox_for_model_with_dunder_html(self):
grandchild = GrandChild.objects.create(name="name")
request = self._mocked_authenticated_request("/grandchild/", self.superuser)
m = GrandChildAdmin(GrandChild, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": GrandChild._meta})
table_output = template.render(context)
link = reverse(
"admin:admin_changelist_grandchild_change", args=(grandchild.id,)
)
row_html = build_tbody_html(
grandchild,
link,
"name",
'<td class="field-parent__name">-</td>'
'<td class="field-parent__parent__name">-</td>',
)
self.assertNotEqual(
table_output.find(row_html),
-1,
"Failed to find expected row element: %s" % table_output,
)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
"</div>"
) % new_child.id
self.assertInHTML(
hiddenfields_div, table_output, msg_prefix="Failed to find hidden fields"
)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name">'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name="parent")
for i in range(1, 201):
Child.objects.create(name="name %s" % i, parent=new_parent)
request = self.factory.get("/child/", data={"p": -1}) # Anything outside range
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
with self.assertRaises(IncorrectLookupParameters):
m.get_changelist_instance(request)
@skipUnlessDBFeature("supports_transactions")
def test_list_editable_atomicity(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
self.client.force_login(self.superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-1-uuid": str(b.pk),
"form-0-load": "9.0",
"form-0-speed": "3.0",
"form-1-load": "5.0",
"form-1-speed": "1.0",
"_save": "Save",
}
with mock.patch(
"django.contrib.admin.ModelAdmin.log_change", side_effect=DatabaseError
):
with self.assertRaises(DatabaseError):
self.client.post(changelist_url, data)
# Original values are preserved.
a.refresh_from_db()
self.assertEqual(a.load, 4)
self.assertEqual(a.speed, 1)
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
with mock.patch(
"django.contrib.admin.ModelAdmin.log_change",
side_effect=[None, DatabaseError],
):
with self.assertRaises(DatabaseError):
self.client.post(changelist_url, data)
# Original values are preserved.
a.refresh_from_db()
self.assertEqual(a.load, 4)
self.assertEqual(a.speed, 1)
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name="parent")
for i in range(1, 201):
Child.objects.create(name="name %s" % i, parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = CustomPaginationAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get("/band/", data={"genres": blues.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = GroupAdmin(Group, custom_site)
request = self.factory.get("/group/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get("/concert/", data={"group__members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defines the relationship.
"""
lead = Musician.objects.create(name="John")
four = Quartet.objects.create(name="The Beatles")
Membership.objects.create(group=four, music=lead, role="lead voice")
Membership.objects.create(group=four, music=lead, role="guitar player")
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get("/quartet/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name="Player A")
three = ChordsBand.objects.create(name="The Chords Trio")
Invitation.objects.create(band=three, player=lead, instrument="guitar")
Invitation.objects.create(band=three, player=lead, instrument="bass")
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get("/chordsband/", data={"members": lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name="Mary")
# Two children with the same name
Child.objects.create(parent=parent, name="Daniel")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={"child__name": "Daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_changelist_search_form_validation(self):
m = ConcertAdmin(Concert, custom_site)
tests = [
({SEARCH_VAR: "\x00"}, "Null characters are not allowed."),
({SEARCH_VAR: "some\x00thing"}, "Null characters are not allowed."),
]
for case, error in tests:
with self.subTest(case=case):
request = self.factory.get("/concert/", case)
request.user = self.superuser
request._messages = CookieStorage(request)
m.get_changelist_instance(request)
messages = [m.message for m in request._messages]
self.assertEqual(1, len(messages))
self.assertEqual(error, messages[0])
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name="Mary")
Child.objects.create(parent=parent, name="Danielle")
Child.objects.create(parent=parent, name="Daniel")
m = ParentAdmin(Parent, custom_site)
request = self.factory.get("/parent/", data={SEARCH_VAR: "daniel"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name="Vox")
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
Membership.objects.create(group=band, music=lead, role="lead voice")
Membership.objects.create(group=band, music=lead, role="bass player")
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get("/concert/", data={SEARCH_VAR: "vox"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
# Queryset must be deletable.
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
def test_multiple_search_fields(self):
"""
All rows containing each of the searched words are returned, where each
word must be in one of search_fields.
"""
band_duo = Group.objects.create(name="Duo")
band_hype = Group.objects.create(name="The Hype")
mary = Musician.objects.create(name="Mary Halvorson")
jonathan = Musician.objects.create(name="Jonathan Finlayson")
band_duo.members.set([mary, jonathan])
Concert.objects.create(name="Tiny desk concert", group=band_duo)
Concert.objects.create(name="Woodstock concert", group=band_hype)
# FK lookup.
concert_model_admin = ConcertAdmin(Concert, custom_site)
concert_model_admin.search_fields = ["group__name", "name"]
# Reverse FK lookup.
group_model_admin = GroupAdmin(Group, custom_site)
group_model_admin.search_fields = ["name", "concert__name", "members__name"]
for search_string, result_count in (
("Duo Concert", 1),
("Tiny Desk Concert", 1),
("Concert", 2),
("Other Concert", 0),
("Duo Woodstock", 0),
):
with self.subTest(search_string=search_string):
# FK lookup.
request = self.factory.get(
"/concert/", data={SEARCH_VAR: search_string}
)
request.user = self.superuser
concert_changelist = concert_model_admin.get_changelist_instance(
request
)
self.assertEqual(concert_changelist.queryset.count(), result_count)
# Reverse FK lookup.
request = self.factory.get("/group/", data={SEARCH_VAR: search_string})
request.user = self.superuser
group_changelist = group_model_admin.get_changelist_instance(request)
self.assertEqual(group_changelist.queryset.count(), result_count)
# Many-to-many lookup.
for search_string, result_count in (
("Finlayson Duo Tiny", 1),
("Finlayson", 1),
("Finlayson Hype", 0),
("Jonathan Finlayson Duo", 1),
("Mary Jonathan Duo", 0),
("Oscar Finlayson Duo", 0),
):
with self.subTest(search_string=search_string):
request = self.factory.get("/group/", data={SEARCH_VAR: search_string})
request.user = self.superuser
group_changelist = group_model_admin.get_changelist_instance(request)
self.assertEqual(group_changelist.queryset.count(), result_count)
def test_pk_in_search_fields(self):
band = Group.objects.create(name="The Hype")
Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__pk"]
request = self.factory.get("/concert/", data={SEARCH_VAR: band.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
request = self.factory.get("/concert/", data={SEARCH_VAR: band.pk + 5})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
def test_builtin_lookup_in_search_fields(self):
band = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["name__iexact"]
request = self.factory.get("/", data={SEARCH_VAR: "woodstock"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "wood"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_in_search_fields(self):
band = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__name__cc"]
with register_lookup(Field, Contains, lookup_name="cc"):
request = self.factory.get("/", data={SEARCH_VAR: "Hype"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "Woodstock"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_spanning_relations_with_custom_lookup_in_search_fields(self):
hype = Group.objects.create(name="The Hype")
concert = Concert.objects.create(name="Woodstock", group=hype)
vox = Musician.objects.create(name="Vox", age=20)
Membership.objects.create(music=vox, group=hype)
# Register a custom lookup on IntegerField to ensure that field
# traversing logic in ModelAdmin.get_search_results() works.
with register_lookup(IntegerField, Exact, lookup_name="exactly"):
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ["group__members__age__exactly"]
request = self.factory.get("/", data={SEARCH_VAR: "20"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get("/", data={SEARCH_VAR: "21"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_with_pk_shortcut(self):
self.assertEqual(CharPK._meta.pk.name, "char_pk") # Not equal to 'pk'.
m = admin.ModelAdmin(CustomIdUser, custom_site)
abc = CharPK.objects.create(char_pk="abc")
abcd = CharPK.objects.create(char_pk="abcd")
m = admin.ModelAdmin(CharPK, custom_site)
m.search_fields = ["pk__exact"]
request = self.factory.get("/", data={SEARCH_VAR: "abc"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abc])
request = self.factory.get("/", data={SEARCH_VAR: "abcd"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abcd])
def test_search_with_exact_lookup_for_non_string_field(self):
child = Child.objects.create(name="Asher", age=11)
model_admin = ChildAdmin(Child, custom_site)
for search_term, expected_result in [
("11", [child]),
("Asher", [child]),
("1", []),
("A", []),
("random", []),
]:
request = self.factory.get("/", data={SEARCH_VAR: search_term})
request.user = self.superuser
with self.subTest(search_term=search_term):
# 1 query for filtered result, 1 for filtered count, 1 for
# total count.
with self.assertNumQueries(3):
cl = model_admin.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, expected_result)
def test_search_with_exact_lookup_relationship_field(self):
child = Child.objects.create(name="I am a child", age=11)
grandchild = GrandChild.objects.create(name="I am a grandchild", parent=child)
model_admin = GrandChildAdmin(GrandChild, custom_site)
request = self.factory.get("/", data={SEARCH_VAR: "'I am a child'"})
request.user = self.superuser
cl = model_admin.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [grandchild])
for search_term, expected_result in [
("11", [grandchild]),
("'I am a child'", [grandchild]),
("1", []),
("A", []),
("random", []),
]:
request = self.factory.get("/", data={SEARCH_VAR: search_term})
request.user = self.superuser
with self.subTest(search_term=search_term):
cl = model_admin.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, expected_result)
def test_no_distinct_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
the changelist's query shouldn't have distinct.
"""
m = BandAdmin(Band, custom_site)
for lookup_params in ({}, {"name": "test"}):
request = self.factory.get("/band/", lookup_params)
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertIs(cl.queryset.query.distinct, False)
# A ManyToManyField in params does have distinct applied.
request = self.factory.get("/band/", {"genres": "0"})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertIs(cl.queryset.query.distinct, True)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
request = self.factory.get("/child/")
request.user = self.superuser
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_pagination_render(self):
objs = [Swallow(origin=f"Swallow {i}", load=i, speed=i) for i in range(1, 5)]
Swallow.objects.bulk_create(objs)
request = self.factory.get("/child/")
request.user = self.superuser
admin = SwallowAdmin(Swallow, custom_site)
cl = admin.get_changelist_instance(request)
template = Template(
"{% load admin_list %}{% spaceless %}{% pagination cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": cl.opts})
pagination_output = template.render(context)
self.assertTrue(
pagination_output.startswith(
'<nav class="paginator" aria-labelledby="pagination">'
)
)
self.assertInHTML(
'<h2 id="pagination" class="visually-hidden">Pagination swallows</h2>',
pagination_output,
)
self.assertTrue(pagination_output.endswith("</nav>"))
self.assertInHTML(
'<li><a role="button" href="" aria-current="page">1</a></li>',
pagination_output,
)
self.assertInHTML(
'<li><a role="button" href="?p=2">2</a></li>',
pagination_output,
)
self.assertEqual(pagination_output.count('aria-current="page"'), 1)
self.assertEqual(pagination_output.count('href=""'), 1)
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
self.client.force_login(self.superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse("admin:admin_changelist_event_changelist"))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, str(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name="parent")
for i in range(10):
Child.objects.create(name="child %s" % i, parent=parent)
user_noparents = self._create_superuser("noparents")
user_parents = self._create_superuser("parents")
# Test with user 'noparents'
m = custom_site.get_model_admin(Child)
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, "Parent object")
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ["name", "age"])
self.assertEqual(list_display_links, ["name"])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_parents)
response = m.changelist_view(request)
self.assertContains(response, "Parent object")
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ("parent", "name", "age"))
self.assertEqual(list_display_links, ["parent"])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site.get_model_admin(Child)
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertContains(response, "Parent object")
def test_show_all(self):
parent = Parent.objects.create(name="anything")
for i in range(1, 31):
Child.objects.create(name="name %s" % i, parent=parent)
Child.objects.create(name="filtered %s" % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get("/child/", data={ALL_VAR: ""})
request.user = self.superuser
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name="parent")
for i in range(1, 10):
Child.objects.create(id=i, name="child %s" % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/child/", superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse("admin:admin_changelist_child_change", args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ("parent", "name", "age"))
self.assertEqual(list_display_links, ["age"])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name="parent")
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/parent/", superuser)
response = m.changelist_view(request)
link = reverse("admin:admin_changelist_parent_change", args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_link_field_display_links(self):
self.client.force_login(self.superuser)
g = Genre.objects.create(
name="Blues",
file="documents/blues_history.txt",
url="http://blues_history.com",
)
response = self.client.get(reverse("admin:admin_changelist_genre_changelist"))
self.assertContains(
response,
'<a href="/admin/admin_changelist/genre/%s/change/">'
"documents/blues_history.txt</a>" % g.pk,
)
self.assertContains(
response,
'<a href="/admin/admin_changelist/genre/%s/change/">'
"http://blues_history.com</a>" % g.pk,
)
def test_blank_str_display_links(self):
self.client.force_login(self.superuser)
gc = GrandChild.objects.create(name=" ")
response = self.client.get(
reverse("admin:admin_changelist_grandchild_changelist")
)
self.assertContains(
response,
'<a href="/admin/admin_changelist/grandchild/%s/change/">-</a>' % gc.pk,
)
def test_clear_all_filters_link(self):
self.client.force_login(self.superuser)
url = reverse("admin:auth_user_changelist")
response = self.client.get(url)
self.assertNotContains(response, "✖ Clear all filters")
link = '<a href="%s">✖ Clear all filters</a>'
for data, href in (
({"is_staff__exact": "0"}, "?"),
(
{"is_staff__exact": "0", "username__startswith": "test"},
"?username__startswith=test",
),
(
{"is_staff__exact": "0", SEARCH_VAR: "test"},
"?%s=test" % SEARCH_VAR,
),
(
{"is_staff__exact": "0", IS_POPUP_VAR: "id"},
"?%s=id" % IS_POPUP_VAR,
),
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertContains(response, link % href)
def test_clear_all_filters_link_callable_filter(self):
self.client.force_login(self.superuser)
url = reverse("admin:admin_changelist_band_changelist")
response = self.client.get(url)
self.assertNotContains(response, "✖ Clear all filters")
link = '<a href="%s">✖ Clear all filters</a>'
for data, href in (
({"nr_of_members_partition": "5"}, "?"),
(
{"nr_of_members_partition": "more", "name__startswith": "test"},
"?name__startswith=test",
),
(
{"nr_of_members_partition": "5", IS_POPUP_VAR: "id"},
"?%s=id" % IS_POPUP_VAR,
),
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertContains(response, link % href)
def test_no_clear_all_filters_link(self):
self.client.force_login(self.superuser)
url = reverse("admin:auth_user_changelist")
link = ">✖ Clear all filters</a>"
for data in (
{SEARCH_VAR: "test"},
{ORDER_VAR: "-1"},
{TO_FIELD_VAR: "id"},
{PAGE_VAR: "1"},
{IS_POPUP_VAR: "1"},
{IS_FACETS_VAR: ""},
{"username__startswith": "test"},
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertNotContains(response, link)
def test_tuple_list_display(self):
swallow = Swallow.objects.create(origin="Africa", load="12.34", speed="22.2")
swallow2 = Swallow.objects.create(origin="Africa", load="12.34", speed="22.2")
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser("superuser")
request = self._mocked_authenticated_request("/swallow/", superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, str(swallow.origin))
self.assertContains(response, str(swallow.load))
self.assertContains(response, str(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(
response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o
)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
c = Swallow.objects.create(origin="Swallow C", load=5, speed=5)
d = Swallow.objects.create(origin="Swallow D", load=9, speed=9)
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(d.pk),
"form-1-uuid": str(c.pk),
"form-2-uuid": str(a.pk),
"form-0-load": "9.0",
"form-0-speed": "9.0",
"form-1-load": "5.0",
"form-1-speed": "5.0",
"form-2-load": "5.0",
"form-2-speed": "4.0",
"_save": "Save",
}
response = self.client.post(
changelist_url, data, follow=True, extra={"o": "-2"}
)
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, "1 swallow was changed successfully.")
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data["form-2-load"]))
self.assertEqual(a.speed, float(data["form-2-speed"]))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data["form-1-load"]))
self.assertEqual(c.speed, float(data["form-1-speed"]))
d.refresh_from_db()
self.assertEqual(d.load, float(data["form-0-load"]))
self.assertEqual(d.speed, float(data["form-0-speed"]))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_get_edited_object_ids(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
c = Swallow.objects.create(origin="Swallow C", load=5, speed=5)
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-1-uuid": str(b.pk),
"form-2-uuid": str(c.pk),
"form-0-load": "9.0",
"form-0-speed": "9.0",
"form-1-load": "5.0",
"form-1-speed": "5.0",
"form-2-load": "5.0",
"form-2-speed": "4.0",
"_save": "Save",
}
request = self.factory.post(changelist_url, data=data)
pks = m._get_edited_object_pks(request, prefix="form")
self.assertEqual(sorted(pks), sorted([str(a.pk), str(b.pk), str(c.pk)]))
def test_get_list_editable_queryset(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form")
self.assertEqual(queryset.count(), 1)
data["form-0-uuid"] = "INVALD_PRIMARY_KEY"
# The unfiltered queryset is returned if there's invalid data.
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form")
self.assertEqual(queryset.count(), 2)
def test_get_list_editable_queryset_with_regex_chars_in_prefix(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form$-TOTAL_FORMS": "2",
"form$-INITIAL_FORMS": "2",
"form$-MIN_NUM_FORMS": "0",
"form$-MAX_NUM_FORMS": "1000",
"form$-0-uuid": str(a.pk),
"form$-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix="form$")
self.assertEqual(queryset.count(), 1)
def test_changelist_view_list_editable_changed_objects_uses_filter(self):
"""
list_editable edits use a filtered queryset to limit memory usage.
"""
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-0-load": "10",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
with CaptureQueriesContext(connection) as context:
response = self.client.post(changelist_url, data=data)
self.assertEqual(response.status_code, 200)
self.assertIn("WHERE", context.captured_queries[4]["sql"])
self.assertIn("IN", context.captured_queries[4]["sql"])
# Check only the first few characters since the UUID may have
# dashes.
self.assertIn(str(a.pk)[:8], context.captured_queries[4]["sql"])
def test_list_editable_error_title(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
Swallow.objects.create(origin="Swallow B", load=2, speed=2)
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-0-load": "invalid",
"_save": "Save",
}
superuser = self._create_superuser("superuser")
self.client.force_login(superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
response = self.client.post(changelist_url, data=data)
self.assertContains(response, "Error: Select swallow to change")
def test_deterministic_order_for_unordered_model(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model doesn't have any
default ordering defined (#17198).
"""
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(1, 6):
request = self._mocked_authenticated_request(
"/unorderedobject/?p=%s" % page, superuser
)
response = model_admin.changelist_view(request)
for result in response.context_data["cl"].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as
# well.
UnorderedObjectAdmin.ordering = ["bool"]
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ["bool", "-pk"]
check_results_order()
UnorderedObjectAdmin.ordering = ["bool", "pk"]
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ["-id", "bool"]
check_results_order()
UnorderedObjectAdmin.ordering = ["id", "bool"]
check_results_order(ascending=True)
def test_ordering_from_model_meta(self):
Swallow.objects.create(origin="Swallow A", load=4, speed=2)
Swallow.objects.create(origin="Swallow B", load=2, speed=1)
Swallow.objects.create(origin="Swallow C", load=5, speed=1)
m = SwallowAdmin(Swallow, custom_site)
request = self._mocked_authenticated_request("/swallow/?o=", self.superuser)
changelist = m.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertQuerySetEqual(
queryset,
[(1.0, 2.0), (1.0, 5.0), (2.0, 4.0)],
lambda s: (s.speed, s.load),
)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model has a manager that
defines a default ordering (#17198).
"""
superuser = self._create_superuser("superuser")
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(1, 6):
request = self._mocked_authenticated_request(
"/orderedobject/?p=%s" % page, superuser
)
response = model_admin.changelist_view(request)
for result in response.context_data["cl"].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering
# (i.e. 'number').
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as
# well.
OrderedObjectAdmin.ordering = ["bool"]
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ["bool", "-pk"]
check_results_order()
OrderedObjectAdmin.ordering = ["bool", "pk"]
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ["-id", "bool"]
check_results_order()
OrderedObjectAdmin.ordering = ["id", "bool"]
check_results_order(ascending=True)
@isolate_apps("admin_changelist")
def test_total_ordering_optimization(self):
class Related(models.Model):
unique_field = models.BooleanField(unique=True)
class Meta:
ordering = ("unique_field",)
class Model(models.Model):
unique_field = models.BooleanField(unique=True)
unique_nullable_field = models.BooleanField(unique=True, null=True)
related = models.ForeignKey(Related, models.CASCADE)
other_related = models.ForeignKey(Related, models.CASCADE)
related_unique = models.OneToOneField(Related, models.CASCADE)
field = models.BooleanField()
other_field = models.BooleanField()
null_field = models.BooleanField(null=True)
class Meta:
unique_together = {
("field", "other_field"),
("field", "null_field"),
("related", "other_related_id"),
}
class ModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return Model.objects.none()
request = self._mocked_authenticated_request("/", self.superuser)
site = admin.AdminSite(name="admin")
model_admin = ModelAdmin(Model, site)
change_list = model_admin.get_changelist_instance(request)
tests = (
([], ["-pk"]),
# Unique non-nullable field.
(["unique_field"], ["unique_field"]),
(["-unique_field"], ["-unique_field"]),
# Unique nullable field.
(["unique_nullable_field"], ["unique_nullable_field", "-pk"]),
# Field.
(["field"], ["field", "-pk"]),
# Related field introspection is not implemented.
(["related__unique_field"], ["related__unique_field", "-pk"]),
# Related attname unique.
(["related_unique_id"], ["related_unique_id"]),
# Related ordering introspection is not implemented.
(["related_unique"], ["related_unique", "-pk"]),
# Composite unique.
(["field", "-other_field"], ["field", "-other_field"]),
# Composite unique nullable.
(["-field", "null_field"], ["-field", "null_field", "-pk"]),
# Composite unique and nullable.
(
["-field", "null_field", "other_field"],
["-field", "null_field", "other_field"],
),
# Composite unique attnames.
(["related_id", "-other_related_id"], ["related_id", "-other_related_id"]),
# Composite unique names.
(["related", "-other_related_id"], ["related", "-other_related_id", "-pk"]),
)
# F() objects composite unique.
total_ordering = [F("field"), F("other_field").desc(nulls_last=True)]
# F() objects composite unique nullable.
non_total_ordering = [F("field"), F("null_field").desc(nulls_last=True)]
tests += (
(total_ordering, total_ordering),
(non_total_ordering, non_total_ordering + ["-pk"]),
)
for ordering, expected in tests:
with self.subTest(ordering=ordering):
self.assertEqual(
change_list._get_deterministic_ordering(ordering), expected
)
@isolate_apps("admin_changelist")
def test_total_ordering_optimization_meta_constraints(self):
class Related(models.Model):
unique_field = models.BooleanField(unique=True)
class Meta:
ordering = ("unique_field",)
class Model(models.Model):
field_1 = models.BooleanField()
field_2 = models.BooleanField()
field_3 = models.BooleanField()
field_4 = models.BooleanField()
field_5 = models.BooleanField()
field_6 = models.BooleanField()
nullable_1 = models.BooleanField(null=True)
nullable_2 = models.BooleanField(null=True)
related_1 = models.ForeignKey(Related, models.CASCADE)
related_2 = models.ForeignKey(Related, models.CASCADE)
related_3 = models.ForeignKey(Related, models.CASCADE)
related_4 = models.ForeignKey(Related, models.CASCADE)
class Meta:
constraints = [
*[
models.UniqueConstraint(fields=fields, name="".join(fields))
for fields in (
["field_1"],
["nullable_1"],
["related_1"],
["related_2_id"],
["field_2", "field_3"],
["field_2", "nullable_2"],
["field_2", "related_3"],
["field_3", "related_4_id"],
)
],
models.CheckConstraint(condition=models.Q(id__gt=0), name="foo"),
models.UniqueConstraint(
fields=["field_5"],
condition=models.Q(id__gt=10),
name="total_ordering_1",
),
models.UniqueConstraint(
fields=["field_6"],
condition=models.Q(),
name="total_ordering",
),
]
class ModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return Model.objects.none()
request = self._mocked_authenticated_request("/", self.superuser)
site = admin.AdminSite(name="admin")
model_admin = ModelAdmin(Model, site)
change_list = model_admin.get_changelist_instance(request)
tests = (
# Unique non-nullable field.
(["field_1"], ["field_1"]),
# Unique nullable field.
(["nullable_1"], ["nullable_1", "-pk"]),
# Related attname unique.
(["related_1_id"], ["related_1_id"]),
(["related_2_id"], ["related_2_id"]),
# Related ordering introspection is not implemented.
(["related_1"], ["related_1", "-pk"]),
# Composite unique.
(["-field_2", "field_3"], ["-field_2", "field_3"]),
# Composite unique nullable.
(["field_2", "-nullable_2"], ["field_2", "-nullable_2", "-pk"]),
# Composite unique and nullable.
(
["field_2", "-nullable_2", "field_3"],
["field_2", "-nullable_2", "field_3"],
),
# Composite field and related field name.
(["field_2", "-related_3"], ["field_2", "-related_3", "-pk"]),
(["field_3", "related_4"], ["field_3", "related_4", "-pk"]),
# Composite field and related field attname.
(["field_2", "related_3_id"], ["field_2", "related_3_id"]),
(["field_3", "-related_4_id"], ["field_3", "-related_4_id"]),
# Partial unique constraint is ignored.
(["field_5"], ["field_5", "-pk"]),
# Unique constraint with an empty condition.
(["field_6"], ["field_6"]),
)
for ordering, expected in tests:
with self.subTest(ordering=ordering):
self.assertEqual(
change_list._get_deterministic_ordering(ordering), expected
)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name="parent")
for i in range(10):
Child.objects.create(name="child %s" % i, parent=parent)
user_noparents = self._create_superuser("noparents")
user_parents = self._create_superuser("parents")
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data["cl"].list_filter, ["name", "age"])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", user_parents)
response = m.changelist_view(request)
self.assertEqual(
response.context_data["cl"].list_filter, ("parent", "name", "age")
)
def test_dynamic_search_fields(self):
child = self._create_superuser("child")
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/child/", child)
response = m.changelist_view(request)
self.assertEqual(response.context_data["cl"].search_fields, ("name", "age"))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get("/group/")
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.list_per_page = 10
ELLIPSIS = cl.paginator.ELLIPSIS
for number, pages, expected in [
(1, 1, []),
(1, 2, [1, 2]),
(6, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, 12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
(6, 13, [1, 2, 3, 4, 5, 6, 7, 8, 9, ELLIPSIS, 12, 13]),
(7, 12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
(7, 13, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
(7, 14, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ELLIPSIS, 13, 14]),
(8, 13, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
(8, 14, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
(8, 15, [1, 2, ELLIPSIS, 5, 6, 7, 8, 9, 10, 11, ELLIPSIS, 14, 15]),
]:
with self.subTest(number=number, pages=pages):
# assuming exactly `pages * cl.list_per_page` objects
Group.objects.all().delete()
for i in range(pages * cl.list_per_page):
Group.objects.create(name="test band")
# setting page number and calculating page range
cl.page_num = number
cl.get_results(request)
self.assertEqual(list(pagination(cl)["page_range"]), expected)
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser("superuser")
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request("/event/", superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn("Add event", response.rendered_content)
def test_search_help_text(self):
superuser = self._create_superuser("superuser")
m = BandAdmin(Band, custom_site)
# search_fields without search_help_text.
m.search_fields = ["name"]
request = self._mocked_authenticated_request("/band/", superuser)
response = m.changelist_view(request)
self.assertIsNone(response.context_data["cl"].search_help_text)
self.assertNotContains(response, '<div class="help id="searchbar_helptext">')
# search_fields with search_help_text.
m.search_help_text = "Search help text"
request = self._mocked_authenticated_request("/band/", superuser)
response = m.changelist_view(request)
self.assertEqual(
response.context_data["cl"].search_help_text, "Search help text"
)
self.assertContains(
response, '<div class="help" id="searchbar_helptext">Search help text</div>'
)
self.assertContains(
response,
'<input type="text" size="40" name="q" value="" id="searchbar" '
'aria-describedby="searchbar_helptext">',
)
def test_search_role(self):
m = BandAdmin(Band, custom_site)
m.search_fields = ["name"]
request = self._mocked_authenticated_request("/band/", self.superuser)
response = m.changelist_view(request)
self.assertContains(
response,
'<h2 id="changelist-search-form" class="visually-hidden">Search bands</h2>',
)
self.assertContains(
response,
'<form id="changelist-search" method="get" role="search" '
'aria-labelledby="changelist-search-form">',
)
def test_search_bar_total_link_preserves_options(self):
self.client.force_login(self.superuser)
url = reverse("admin:auth_user_changelist")
for data, href in (
({"is_staff__exact": "0"}, "?"),
({"is_staff__exact": "0", IS_POPUP_VAR: "1"}, f"?{IS_POPUP_VAR}=1"),
({"is_staff__exact": "0", IS_FACETS_VAR: ""}, f"?{IS_FACETS_VAR}"),
(
{"is_staff__exact": "0", IS_POPUP_VAR: "1", IS_FACETS_VAR: ""},
f"?{IS_POPUP_VAR}=1&{IS_FACETS_VAR}",
),
):
with self.subTest(data=data):
response = self.client.get(url, data=data)
self.assertContains(
response, f'0 results (<a href="{href}">1 total</a>)'
)
def test_list_display_related_field(self):
parent = Parent.objects.create(name="I am your father")
child = Child.objects.create(name="I am your child", parent=parent)
GrandChild.objects.create(name="I am your grandchild", parent=child)
request = self._mocked_authenticated_request("/grandchild/", self.superuser)
m = GrandChildAdmin(GrandChild, custom_site)
response = m.changelist_view(request)
self.assertContains(response, parent.name)
self.assertContains(response, child.name)
def test_list_display_related_field_null(self):
GrandChild.objects.create(name="I am parentless", parent=None)
request = self._mocked_authenticated_request("/grandchild/", self.superuser)
m = GrandChildAdmin(GrandChild, custom_site)
response = m.changelist_view(request)
self.assertContains(response, '<td class="field-parent__name">-</td>')
self.assertContains(response, '<td class="field-parent__parent__name">-</td>')
def test_list_display_related_field_ordering(self):
parent_a = Parent.objects.create(name="Alice")
parent_z = Parent.objects.create(name="Zara")
Child.objects.create(name="Alice's child", parent=parent_a)
Child.objects.create(name="Zara's child", parent=parent_z)
class ChildAdmin(admin.ModelAdmin):
list_display = ["name", "parent__name"]
list_per_page = 1
m = ChildAdmin(Child, custom_site)
# Order ascending.
request = self._mocked_authenticated_request("/grandchild/?o=1", self.superuser)
response = m.changelist_view(request)
self.assertContains(response, parent_a.name)
self.assertNotContains(response, parent_z.name)
# Order descending.
request = self._mocked_authenticated_request(
"/grandchild/?o=-1", self.superuser
)
response = m.changelist_view(request)
self.assertNotContains(response, parent_a.name)
self.assertContains(response, parent_z.name)
def test_list_display_related_field_ordering_fields(self):
class ChildAdmin(admin.ModelAdmin):
list_display = ["name", "parent__name"]
ordering = ["parent__name"]
m = ChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request("/", self.superuser)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {2: "asc"})
| ChangeListTests |
python | py-pdf__pypdf | pypdf/_page.py | {
"start": 10322,
"end": 13591
} | class ____:
"""
Image within the PDF file. *This object is not designed to be built.*
This object should not be modified except using :func:`ImageFile.replace` to replace the image with a new one.
"""
name: str = ""
"""
Filename as identified within the PDF file.
"""
data: bytes = b""
"""
Data as bytes.
"""
image: Optional[Image] = None
"""
Data as PIL image.
"""
indirect_reference: Optional[IndirectObject] = None
"""
Reference to the object storing the stream.
"""
def replace(self, new_image: Image, **kwargs: Any) -> None:
"""
Replace the image with a new PIL image.
Args:
new_image (PIL.Image.Image): The new PIL image to replace the existing image.
**kwargs: Additional keyword arguments to pass to `Image.save()`.
Raises:
TypeError: If the image is inline or in a PdfReader.
TypeError: If the image does not belong to a PdfWriter.
TypeError: If `new_image` is not a PIL Image.
Note:
This method replaces the existing image with a new image.
It is not allowed for inline images or images within a PdfReader.
The `kwargs` parameter allows passing additional parameters
to `Image.save()`, such as quality.
"""
if pil_not_imported:
raise ImportError(
"pillow is required to do image extraction. "
"It can be installed via 'pip install pypdf[image]'"
)
from ._reader import PdfReader # noqa: PLC0415
# to prevent circular import
from .filters import _xobj_to_image # noqa: PLC0415
from .generic import DictionaryObject, PdfObject # noqa: PLC0415
if self.indirect_reference is None:
raise TypeError("Cannot update an inline image.")
if not hasattr(self.indirect_reference.pdf, "_id_translated"):
raise TypeError("Cannot update an image not belonging to a PdfWriter.")
if not isinstance(new_image, Image):
raise TypeError("new_image shall be a PIL Image")
b = BytesIO()
new_image.save(b, "PDF", **kwargs)
reader = PdfReader(b)
page_image = reader.pages[0].images[0]
assert page_image.indirect_reference is not None
self.indirect_reference.pdf._objects[self.indirect_reference.idnum - 1] = (
page_image.indirect_reference.get_object()
)
cast(
PdfObject, self.indirect_reference.get_object()
).indirect_reference = self.indirect_reference
# change the object attributes
extension, byte_stream, img = _xobj_to_image(
cast(DictionaryObject, self.indirect_reference.get_object()),
pillow_parameters=kwargs,
)
assert extension is not None
self.name = self.name[: self.name.rfind(".")] + extension
self.data = byte_stream
self.image = img
def __str__(self) -> str:
return f"{self.__class__.__name__}(name={self.name}, data: {_human_readable_bytes(len(self.data))})"
def __repr__(self) -> str:
return self.__str__()[:-1] + f", hash: {hash(self.data)})"
| ImageFile |
python | walkccc__LeetCode | solutions/1168. Optimize Water Distribution in a Village/1168.py | {
"start": 0,
"end": 779
} | class ____:
def minCostToSupplyWater(
self,
n: int,
wells: list[int],
pipes: list[list[int]],
) -> int:
ans = 0
graph = [[] for _ in range(n + 1)]
minHeap = [] # (d, u)
for u, v, w in pipes:
graph[u].append((v, w))
graph[v].append((u, w))
# Connect virtual 0 with nodes 1 to n.
for i, well in enumerate(wells):
graph[0].append((i + 1, well))
heapq.heappush(minHeap, (well, i + 1))
mst = {0}
while len(mst) < n + 1:
d, u = heapq.heappop(minHeap)
if u in mst:
continue
# Add the new vertex.
mst.add(u)
ans += d
# Expand if possible.
for v, w in graph[u]:
if v not in mst:
heapq.heappush(minHeap, (w, v))
return ans
| Solution |
python | pypa__hatch | backend/src/hatchling/metadata/core.py | {
"start": 55048,
"end": 58289
} | class ____(Generic[PluginManagerBound]):
def __init__(self, root: str, config: dict[str, Any], plugin_manager: PluginManagerBound) -> None:
self.root = root
self.config = config
self.plugin_manager = plugin_manager
self._cached: str | None = None
self._source_name: str | None = None
self._scheme_name: str | None = None
self._source: VersionSourceInterface | None = None
self._scheme: VersionSchemeInterface | None = None
@property
def cached(self) -> str:
if self._cached is None:
try:
self._cached = self.source.get_version_data()["version"]
except Exception as e: # noqa: BLE001
message = f"Error getting the version from source `{self.source.PLUGIN_NAME}`: {e}"
raise type(e)(message) from None
return self._cached
@property
def source_name(self) -> str:
if self._source_name is None:
source: str = self.config.get("source", "regex")
if not source:
message = "The `source` option under the `tool.hatch.version` table must not be empty if defined"
raise ValueError(message)
if not isinstance(source, str):
message = "Field `tool.hatch.version.source` must be a string"
raise TypeError(message)
self._source_name = source
return self._source_name
@property
def scheme_name(self) -> str:
if self._scheme_name is None:
scheme: str = self.config.get("scheme", "standard")
if not scheme:
message = "The `scheme` option under the `tool.hatch.version` table must not be empty if defined"
raise ValueError(message)
if not isinstance(scheme, str):
message = "Field `tool.hatch.version.scheme` must be a string"
raise TypeError(message)
self._scheme_name = scheme
return self._scheme_name
@property
def source(self) -> VersionSourceInterface:
if self._source is None:
from copy import deepcopy
source_name = self.source_name
version_source = self.plugin_manager.version_source.get(source_name)
if version_source is None:
from hatchling.plugin.exceptions import UnknownPluginError
message = f"Unknown version source: {source_name}"
raise UnknownPluginError(message)
self._source = version_source(self.root, deepcopy(self.config))
return self._source
@property
def scheme(self) -> VersionSchemeInterface:
if self._scheme is None:
from copy import deepcopy
scheme_name = self.scheme_name
version_scheme = self.plugin_manager.version_scheme.get(scheme_name)
if version_scheme is None:
from hatchling.plugin.exceptions import UnknownPluginError
message = f"Unknown version scheme: {scheme_name}"
raise UnknownPluginError(message)
self._scheme = version_scheme(self.root, deepcopy(self.config))
return self._scheme
| HatchVersionConfig |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 2343,
"end": 3743
} | class ____:
"""A class containing default methods to
convert units into a human readable string."""
def timestamp(x: float):
"""Converts milliseconds to a datetime object."""
return str(datetime.datetime.fromtimestamp(x / 1000))
def memory(x: int):
"""Converts raw bytes to a human readable memory size."""
if x >= 2**30:
return str(format(x / (2**30), ".3f")) + " GiB"
elif x >= 2**20:
return str(format(x / (2**20), ".3f")) + " MiB"
elif x >= 2**10:
return str(format(x / (2**10), ".3f")) + " KiB"
return str(format(x, ".3f")) + " B"
def duration(x: int):
"""Converts milliseconds to a human readable duration."""
return str(datetime.timedelta(milliseconds=x))
def events(events: List[dict]):
"""Converts a list of task events into a human readable format."""
for event in events:
if "created_ms" in event:
event["created_ms"] = Humanify.timestamp(event["created_ms"])
return events
def node_resources(resources: dict):
"""Converts a node's resources into a human readable format."""
for resource in resources:
if "memory" in resource:
resources[resource] = Humanify.memory(resources[resource])
return resources
@dataclass(init=not IS_PYDANTIC_2)
| Humanify |
python | PrefectHQ__prefect | src/prefect/server/events/services/triggers.py | {
"start": 738,
"end": 2184
} | class ____(RunInEphemeralServers, Service):
"""Evaluates reactive automation triggers"""
consumer_task: asyncio.Task[None] | None = None
@classmethod
def service_settings(cls) -> ServicesBaseSetting:
return get_current_settings().server.services.triggers
async def start(self) -> NoReturn:
assert self.consumer_task is None, "Reactive triggers already started"
consumer_name = generate_unique_consumer_name("reactive-triggers")
logger.info(
f"ReactiveTriggers starting with unique consumer name: {consumer_name}"
)
self.consumer: Consumer = create_consumer(
"events", group="reactive-triggers", name=consumer_name
)
async with triggers.consumer() as handler:
self.consumer_task = asyncio.create_task(self.consumer.run(handler))
logger.debug("Reactive triggers started")
try:
await self.consumer_task
except asyncio.CancelledError:
pass
async def stop(self) -> None:
assert self.consumer_task is not None, "Reactive triggers not started"
self.consumer_task.cancel()
try:
await self.consumer_task
except asyncio.CancelledError:
pass
finally:
await self.consumer.cleanup()
self.consumer_task = None
logger.debug("Reactive triggers stopped")
| ReactiveTriggers |
python | pytorch__pytorch | test/export/test_passes.py | {
"start": 2207,
"end": 2403
} | class ____(OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
return node.op == "call_function" and node.target in {operator.add}
| _AddOperatorSupport |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 43950,
"end": 45745
} | class ____(VOWarning, ValueError):
r"""Invalid arraysize attribute.
From the VOTable 1.2 spec:
A table cell can contain an array of a given primitive type,
with a fixed or variable number of elements; the array may
even be multidimensional. For instance, the position of a
point in a 3D space can be defined by the following::
<FIELD ID="point_3D" datatype="double" arraysize="3"/>
and each cell corresponding to that definition must contain
exactly 3 numbers. An asterisk (\*) may be appended to
indicate a variable number of elements in the array, as in::
<FIELD ID="values" datatype="int" arraysize="100*"/>
where it is specified that each cell corresponding to that
definition contains 0 to 100 integer numbers. The number may
be omitted to specify an unbounded array (in practice up to
=~2×10⁹ elements).
A table cell can also contain a multidimensional array of a
given primitive type. This is specified by a sequence of
dimensions separated by the ``x`` character, with the first
dimension changing fastest; as in the case of a simple array,
the last dimension may be variable in length. As an example,
the following definition declares a table cell which may
contain a set of up to 10 images, each of 64×64 bytes::
<FIELD ID="thumbs" datatype="unsignedByte" arraysize="64×64×10*"/>
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:dim>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:dim>`__
"""
message_template = "Invalid arraysize attribute '{}'"
default_args = ("x",)
| E13 |
python | ray-project__ray | python/ray/data/tests/test_dataset_stats.py | {
"start": 14664,
"end": 16605
} | class ____:
"""Test suite for individual aggregator functions."""
def test_numerical_aggregators(self):
"""Test numerical_aggregators function."""
aggs = numerical_aggregators("test_column")
assert len(aggs) == 8
assert all(hasattr(agg, "get_target_column") for agg in aggs)
assert all(agg.get_target_column() == "test_column" for agg in aggs)
# Check aggregator types
agg_types = [type(agg) for agg in aggs]
assert Count in agg_types
assert Mean in agg_types
assert Min in agg_types
assert Max in agg_types
assert Std in agg_types
assert MissingValuePercentage in agg_types
assert ZeroPercentage in agg_types
assert ApproximateQuantile in agg_types
def test_categorical_aggregators(self):
"""Test categorical_aggregators function."""
aggs = categorical_aggregators("test_column")
assert len(aggs) == 3
assert all(hasattr(agg, "get_target_column") for agg in aggs)
assert all(agg.get_target_column() == "test_column" for agg in aggs)
# Check aggregator types
agg_types = [type(agg) for agg in aggs]
assert Count in agg_types
assert MissingValuePercentage in agg_types
assert ApproximateTopK in agg_types
def test_vector_aggregators(self):
"""Test vector_aggregators function."""
aggs = vector_aggregators("test_column")
assert len(aggs) == 2
assert all(hasattr(agg, "get_target_column") for agg in aggs)
assert all(agg.get_target_column() == "test_column" for agg in aggs)
# Check aggregator types
agg_types = [type(agg) for agg in aggs]
assert Count in agg_types
assert MissingValuePercentage in agg_types
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestIndividualAggregatorFunctions |
python | getsentry__sentry | src/sentry/api/bases/avatar.py | {
"start": 516,
"end": 1804
} | class ____(serializers.Serializer[dict[str, Any]]):
avatar_photo = AvatarField(required=False)
avatar_type = serializers.ChoiceField(
choices=(("upload", "upload"), ("gravatar", "gravatar"), ("letter_avatar", "letter_avatar"))
)
def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
attrs = super().validate(attrs)
if attrs.get("avatar_type") == "upload":
model_type = self.context["type"]
kwargs_copy = self.context["kwargs"].copy()
if "user" in kwargs_copy:
user = kwargs_copy.pop("user")
kwargs_copy["user_id"] = user.id
if issubclass(model_type, ControlAvatarBase):
has_existing_file = model_type.objects.filter(
control_file_id__isnull=False, **kwargs_copy
).exists()
else:
has_existing_file = model_type.objects.filter(
file_id__isnull=False, **kwargs_copy
).exists()
if not has_existing_file and not attrs.get("avatar_photo"):
raise serializers.ValidationError(
{"avatar_type": "Cannot set avatar_type to upload without avatar_photo"}
)
return attrs
| AvatarSerializer |
python | dask__dask | dask/dataframe/dask_expr/io/io.py | {
"start": 7463,
"end": 9745
} | class ____(PartitionsFiltered, BlockwiseIO):
_parameters = [
"func",
"iterables",
"args",
"kwargs",
"user_meta",
"enforce_metadata",
"user_divisions",
"label",
"_partitions",
]
_defaults = {
"user_meta": no_default,
"enforce_metadata": False,
"user_divisions": None,
"label": None,
"_partitions": None,
}
_absorb_projections = False
@functools.cached_property
def _name(self):
if self.label is None:
return funcname(self.func).lower() + "-" + self.deterministic_token
else:
return self.label + "-" + self.deterministic_token
@functools.cached_property
def _meta(self):
if self.operand("user_meta") is not no_default:
meta = self.operand("user_meta")
return make_meta(meta)
else:
vals = [v[0] for v in self.iterables]
meta = delayed(self.func)(*vals, *self.args, **self.kwargs)
return delayed(make_meta)(meta).compute()
def _divisions(self):
if self.operand("user_divisions"):
return self.operand("user_divisions")
else:
npartitions = len(self.iterables[0])
return (None,) * (npartitions + 1)
@property
def apply_func(self):
if self.enforce_metadata:
return apply_and_enforce
return self.func
@functools.cached_property
def apply_kwargs(self):
kwargs = self.kwargs
if self.enforce_metadata:
kwargs = kwargs.copy()
kwargs.update(
{
"_func": self.func,
"_meta": self._meta,
}
)
return kwargs
def _filtered_task(self, name: Key, index: int) -> Task:
vals = [v[index] for v in self.iterables]
if self.enforce_metadata:
return Task(
name,
apply_and_enforce,
*vals,
*self.args,
**self.apply_kwargs,
_data_producer=True,
)
return Task(
name, self.func, *vals, *self.args, **self.apply_kwargs, _data_producer=True
)
| FromMap |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 661826,
"end": 662304
} | class ____(
sgqlc.types.Type
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
message = sgqlc.types.Field(String, graphql_name="message")
| UpdateEnterpriseMembersCanChangeRepositoryVisibilitySettingPayload |
python | PyCQA__pyflakes | pyflakes/test/test_doctests.py | {
"start": 12357,
"end": 12469
} | class ____(_DoctestMixin, TestImports):
"""Run TestImports with each test wrapped in a doctest."""
| TestImports |
python | celery__celery | celery/canvas.py | {
"start": 35886,
"end": 52283
} | class ____(Signature):
tasks = getitem_property('kwargs.tasks', 'Tasks in chain.')
@classmethod
def from_dict(cls, d, app=None):
tasks = d['kwargs']['tasks']
if tasks:
if isinstance(tasks, tuple): # aaaargh
tasks = d['kwargs']['tasks'] = list(tasks)
tasks = [maybe_signature(task, app=app) for task in tasks]
return cls(tasks, app=app, **d['options'])
def __init__(self, *tasks, **options):
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
else tasks)
super().__init__('celery.chain', (), {'tasks': tasks}, **options
)
self._use_link = options.pop('use_link', None)
self.subtask_type = 'chain'
self._frozen = None
def __call__(self, *args, **kwargs):
if self.tasks:
return self.apply_async(args, kwargs)
def __or__(self, other):
if isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
if not isinstance(other, group):
return self.__or__(other)
# chain | group() -> chain
tasks = self.unchain_tasks()
if not tasks:
# If the chain is empty, return the group
return other
if isinstance(tasks[-1], chord):
# CHAIN [last item is chord] | GROUP -> chain with chord body.
tasks[-1].body = tasks[-1].body | other
return type(self)(tasks, app=self.app)
# use type(self) for _chain subclasses
return type(self)(seq_concat_item(
tasks, other), app=self._app)
elif isinstance(other, _chain):
# chain | chain -> chain
return reduce(operator.or_, other.unchain_tasks(), self)
elif isinstance(other, Signature):
if self.tasks and isinstance(self.tasks[-1], group):
# CHAIN [last item is group] | TASK -> chord
sig = self.clone()
sig.tasks[-1] = chord(
sig.tasks[-1], other, app=self._app)
# In the scenario where the second-to-last item in a chain is a chord,
# it leads to a situation where two consecutive chords are formed.
# In such cases, a further upgrade can be considered.
# This would involve chaining the body of the second-to-last chord with the last chord."
if len(sig.tasks) > 1 and isinstance(sig.tasks[-2], chord):
sig.tasks[-2].body = sig.tasks[-2].body | sig.tasks[-1]
sig.tasks = sig.tasks[:-1]
return sig
elif self.tasks and isinstance(self.tasks[-1], chord):
# CHAIN [last item is chord] -> chain with chord body.
sig = self.clone()
sig.tasks[-1].body = sig.tasks[-1].body | other
return sig
else:
# chain | task -> chain
# use type(self) for _chain subclasses
return type(self)(seq_concat_item(
self.unchain_tasks(), other), app=self._app)
else:
return NotImplemented
def clone(self, *args, **kwargs):
to_signature = maybe_signature
signature = super().clone(*args, **kwargs)
signature.kwargs['tasks'] = [
to_signature(sig, app=self._app, clone=True)
for sig in signature.kwargs['tasks']
]
return signature
def unchain_tasks(self):
"""Return a list of tasks in the chain.
The tasks list would be cloned from the chain's tasks.
All of the chain callbacks would be added to the last task in the (cloned) chain.
All of the tasks would be linked to the same error callback
as the chain itself, to ensure that the correct error callback is called
if any of the (cloned) tasks of the chain fail.
"""
# Clone chain's tasks assigning signatures from link_error
# to each task and adding the chain's links to the last task.
tasks = [t.clone() for t in self.tasks]
for sig in maybe_list(self.options.get('link')) or []:
tasks[-1].link(sig)
for sig in maybe_list(self.options.get('link_error')) or []:
for task in tasks:
task.link_error(sig)
return tasks
def apply_async(self, args=None, kwargs=None, **options):
# python is best at unpacking kwargs, so .run is here to do that.
args = args if args else ()
kwargs = kwargs if kwargs else []
app = self.app
if app.conf.task_always_eager:
with allow_join_result():
return self.apply(args, kwargs, **options)
return self.run(args, kwargs, app=app, **(
dict(self.options, **options) if options else self.options))
def run(self, args=None, kwargs=None, group_id=None, chord=None,
task_id=None, link=None, link_error=None, publisher=None,
producer=None, root_id=None, parent_id=None, app=None,
group_index=None, **options):
"""Executes the chain.
Responsible for executing the chain in the correct order.
In a case of a chain of a single task, the task is executed directly
and the result is returned for that task specifically.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
args = args if args else ()
kwargs = kwargs if kwargs else []
app = app or self.app
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
# Unpack nested chains/groups/chords
tasks, results_from_prepare = self.prepare_steps(
args, kwargs, self.tasks, root_id, parent_id, link_error, app,
task_id, group_id, chord, group_index=group_index,
)
# For a chain of single task, execute the task directly and return the result for that task
# For a chain of multiple tasks, execute all of the tasks and return the AsyncResult for the chain
if results_from_prepare:
if link:
tasks[0].extend_list_option('link', link)
first_task = tasks.pop()
options = _prepare_chain_from_options(options, tasks, use_link)
result_from_apply = first_task.apply_async(**options)
# If we only have a single task, it may be important that we pass
# the real result object rather than the one obtained via freezing.
# e.g. For `GroupResult`s, we need to pass back the result object
# which will actually have its promise fulfilled by the subtasks,
# something that will never occur for the frozen result.
if not tasks:
return result_from_apply
else:
return results_from_prepare[0]
# in order for a chain to be frozen, each of the members of the chain individually needs to be frozen
# TODO figure out why we are always cloning before freeze
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None, group_index=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
_, results = self._frozen = self.prepare_steps(
self.args, self.kwargs, self.tasks, root_id, parent_id, None,
self.app, _id, group_id, chord, clone=False,
group_index=group_index,
)
return results[0]
def stamp(self, visitor=None, append_stamps=False, **headers):
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_chain_start(self, **headers) or {}
headers = self._stamp_headers(visitor_headers, append_stamps, **headers)
self.stamp_links(visitor, **headers)
for task in self.tasks:
task.stamp(visitor, append_stamps, **headers)
if visitor is not None:
visitor.on_chain_end(self, **headers)
def prepare_steps(self, args, kwargs, tasks,
root_id=None, parent_id=None, link_error=None, app=None,
last_task_id=None, group_id=None, chord_body=None,
clone=True, from_dict=Signature.from_dict,
group_index=None):
"""Prepare the chain for execution.
To execute a chain, we first need to unpack it correctly.
During the unpacking, we might encounter other chains, groups, or chords
which we need to unpack as well.
For example:
chain(signature1, chain(signature2, signature3)) --> Upgrades to chain(signature1, signature2, signature3)
chain(group(signature1, signature2), signature3) --> Upgrades to chord([signature1, signature2], signature3)
The responsibility of this method is to ensure that the chain is
correctly unpacked, and then the correct callbacks are set up along the way.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
tasks (List[Signature]): The tasks of the chain.
root_id (str): The id of the root task.
parent_id (str): The id of the parent task.
link_error (Union[List[Signature], Signature]): The error callback.
will be set for all tasks in the chain.
app (Celery): The Celery app instance.
last_task_id (str): The id of the last task in the chain.
group_id (str): The id of the group that the chain is a part of.
chord_body (Signature): The body of the chord, used to synchronize with the chain's
last task and the chord's body when used together.
clone (bool): Whether to clone the chain's tasks before modifying them.
from_dict (Callable): A function that takes a dict and returns a Signature.
Returns:
Tuple[List[Signature], List[AsyncResult]]: The frozen tasks of the chain, and the async results
"""
app = app or self.app
# use chain message field for protocol 2 and later.
# this avoids pickle blowing the stack on the recursion
# required by linking task together in a tree structure.
# (why is pickle using recursion? or better yet why cannot python
# do tail call optimization making recursion actually useful?)
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
steps = deque(tasks)
# optimization: now the pop func is a local variable
steps_pop = steps.pop
steps_extend = steps.extend
prev_task = None
prev_res = None
tasks, results = [], []
i = 0
# NOTE: We are doing this in reverse order.
# The result is a list of tasks in reverse order, that is
# passed as the ``chain`` message field.
# As it's reversed the worker can just do ``chain.pop()`` to
# get the next task in the chain.
while steps:
task = steps_pop()
# if steps is not empty, this is the first task - reverse order
# if i = 0, this is the last task - again, because we're reversed
is_first_task, is_last_task = not steps, not i
if not isinstance(task, abstract.CallableSignature):
task = from_dict(task, app=app)
if isinstance(task, group):
# when groups are nested, they are unrolled - all tasks within
# groups should be called in parallel
task = maybe_unroll_group(task)
# first task gets partial args from chain
if clone:
if is_first_task:
task = task.clone(args, kwargs)
else:
task = task.clone()
elif is_first_task:
task.args = tuple(args) + tuple(task.args)
if isinstance(task, _chain):
# splice (unroll) the chain
steps_extend(task.tasks)
continue
# TODO why isn't this asserting is_last_task == False?
if isinstance(task, group) and prev_task:
# automatically upgrade group(...) | s to chord(group, s)
# for chords we freeze by pretending it's a normal
# signature instead of a group.
tasks.pop()
results.pop()
try:
task = chord(
task, body=prev_task,
task_id=prev_res.task_id, root_id=root_id, app=app,
)
except AttributeError:
# A GroupResult does not have a task_id since it consists
# of multiple tasks.
# We therefore, have to construct the chord without it.
# Issues #5467, #3585.
task = chord(
task, body=prev_task,
root_id=root_id, app=app,
)
if tasks:
prev_task = tasks[-1]
prev_res = results[-1]
else:
prev_task = None
prev_res = None
if is_last_task:
# chain(task_id=id) means task id is set for the last task
# in the chain. If the chord is part of a chord/group
# then that chord/group must synchronize based on the
# last task in the chain, so we only set the group_id and
# chord callback for the last task.
res = task.freeze(
last_task_id,
root_id=root_id, group_id=group_id, chord=chord_body,
group_index=group_index,
)
else:
res = task.freeze(root_id=root_id)
i += 1
if prev_task:
if use_link:
# link previous task to this task.
task.link(prev_task)
if prev_res and not prev_res.parent:
prev_res.parent = res
if link_error:
for errback in maybe_list(link_error):
task.link_error(errback)
tasks.append(task)
results.append(res)
prev_task, prev_res = task, res
if isinstance(task, chord):
app.backend.ensure_chords_allowed()
# If the task is a chord, and the body is a chain
# the chain has already been prepared, and res is
# set to the last task in the callback chain.
# We need to change that so that it points to the
# group result object.
node = res
while node.parent:
node = node.parent
prev_res = node
self.id = last_task_id
return tasks, results
def apply(self, args=None, kwargs=None, **options):
args = args if args else ()
kwargs = kwargs if kwargs else {}
last, (fargs, fkwargs) = None, (args, kwargs)
for task in self.tasks:
res = task.clone(fargs, fkwargs).apply(
last and (last.get(),), **dict(self.options, **options))
res.parent, last, (fargs, fkwargs) = last, res, (None, None)
return last
@property
def app(self):
app = self._app
if app is None:
try:
app = self.tasks[0]._app
except LookupError:
pass
return app or current_app
def __repr__(self):
if not self.tasks:
return f'<{type(self).__name__}@{id(self):#x}: empty>'
return remove_repeating_from_task(
self.tasks[0]['task'],
' | '.join(repr(t) for t in self.tasks))
| _chain |
python | skorch-dev__skorch | skorch/llm/classifier.py | {
"start": 21564,
"end": 30553
} | class ____(_LlmBase):
"""Zero-shot classification using a Large Language Model (LLM).
This class allows you to use an LLM from Hugging Face transformers for
zero-shot classification. There is no training during the ``fit`` call,
instead, the LLM will be prompted to predict the labels for each sample.
Parameters
----------
model_name : str or None (default=None)
The name of the model to use. This is the same name as used on Hugging
Face Hub. For example, to use GPT2, pass ``'gpt2'``, to use the small
flan-t5 model, pass ``'google/flan-t5-small'``. If the ``model_name``
parameter is passed, don't pass ``model`` or ``tokenizer`` parameters.
model : torch.nn.Module or None (default=None)
The model to use. This should be a PyTorch text generation model from
Hugging Face Hub or a model with the same API. Most notably, the model
should have a ``generate`` method. If you pass the ``model``, you should
also pass the ``tokenizer``, but you shall not pass the ``model_name``.
Passing the model explicitly instead of the ``model_name`` can have a few
advantages. Most notably, this allows you to modify the model, e.g.
changing its config or how the model is loaded. For instance, some models
can only be loaded with the option ``trust_remote_code=True``. If using
the ``model_name`` argument, the default settings will be used instead.
Passing the model explicitly also allows you to use custom models that are
not uploaded to Hugging Face Hub.
tokenizer (default=None)
A tokenizer that is compatible with the model. Typically, this is loaded
using the ``AutoTokenizer.from_pretrained`` method provided by Hugging
Face transformers. If you pass the ``tokenizer``, you should also pass the
``model``, but you should not pass the ``model_name``.
prompt : str or None (default=None)
The prompt to use. This is the text that will be passed to the model to
generate the prediction. If no prompt is passed, a default prompt will be
used. The prompt should be a Python string with two placeholders, one
called ``text`` and one called ``labels``. The ``text`` placeholder will
replaced by the contents from ``X`` and the ``labels`` placeholder will be
replaced by the unique labels taken from ``y``.
An example prompt could be something like this:
"Classify this text: {text}. Possible labels are {labels}". Your
response: "
All general tips for good prompt crafting apply here as well. Be aware
that if the prompt is too long, it will exceed the context size of the
model.
probas_sum_to_1 : bool (default=True)
If ``True``, then the probabilities for each sample will be normalized to
sum to 1. If ``False``, the probabilities will not be normalized.
In general, without normalization, the probabilities will not sum to 1
because the LLM can generate any token, not just the labels. Since the
model is restricted to only generate the available labels, there will be
some probability mass that is unaccounted for. You could consider the
missing probability mass to be an implicit 'other' class.
In general, you should set this parameter to ``True`` because the default
assumption is that probabilities sum to 1. However, setting this to
``False`` can be useful for debugging purposes, as it allows you to see
how much probability the LLM assigns to different tokens. If the total
probabilities are very low, it could be a sign that the LLM is not
powerful enough or that the prompt is not well crafted.
device : str or torch.device (default='cpu')
The device to use. In general, using a GPU or other accelerated hardware
is advised if runtime performance is critical.
Note that if the ``model`` parameter is passed explicitly, the device of
that model takes precedence over the value of ``device``.
error_low_prob : {'ignore', 'warn', 'raise', 'return_none'} (default='ignore')
Controls what should happen if the sum of the probabilities for a sample
is below a given threshold. When encountering low probabilities, the
options are to do one of the following:
- ``'ignore'``: do nothing
- ``'warn'``: issue a warning
- ``'raise'``: raise an error
- ``'return_none'``: return ``None`` as the prediction when calling
``.predict``
The threshold is controlled by the ``threshold_low_prob`` parameter.
threshold_low_prob : float (default=0.0)
The threshold for the sum of the probabilities below which they are
considered to be too low. The consequences of low probabilities are
controlled by the ``error_low_prob`` parameter.
use_caching : bool (default=True)
If ``True``, the predictions for each sample will be cached, as well as
the intermediate result for each generated token. This can speed up
predictions when some samples are duplicated, or when labels have a long
common prefix. An example of the latter would be if a label is called
"intent.support.email" and another label is called "intent.support.phone",
then the tokens for the common prefix "intent.support." are reused for
both labels, as their probabilities are identical.
Note that caching is currently not supported for encoder-decoder
architectures such as flan-t5. If you want to use such an architecture,
turn caching off.
If you see any issues you might suspect are caused by caching, turn this
option off, see if it helps, and report the issue on the skorch GitHub
page.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier. This attribute can be used
to identify which column in the probabilties returned by ``predict_proba``
corresponds to which class.
"""
def __init__(
self,
model_name=None,
*,
model=None,
tokenizer=None,
prompt=None,
probas_sum_to_1=True,
device='cpu',
error_low_prob='ignore',
threshold_low_prob=0.0,
use_caching=True,
):
self.model_name = model_name
self.model = model
self.tokenizer = tokenizer
self.prompt = prompt
self.probas_sum_to_1 = probas_sum_to_1
self.device = device
self.error_low_prob = error_low_prob
self.threshold_low_prob = threshold_low_prob
self.use_caching = use_caching
def check_prompt(self, prompt):
"""Check if the prompt is well formed.
If no prompt is provided, return the default prompt.
Raises
------
ValueError
When the prompt is not well formed.
"""
if prompt is None:
prompt = DEFAULT_PROMPT_ZERO_SHOT
kwargs = {
'text': "some text",
'labels': ["foo", "bar"],
}
_check_format_string(prompt, kwargs)
return prompt
def get_prompt(self, text):
"""Return the prompt for the given sample."""
self.check_is_fitted()
return self.prompt_.format(text=text, labels=self.classes_.tolist())
def check_X_y(self, X, y, **fit_params):
"""Check that input data is well-behaved."""
# X can be None but not y
if y is None:
raise ValueError(
"y cannot be None, as it is used to infer the existing classes"
)
if not isinstance(y[0], str):
# don't raise an error, as, hypothetically, the LLM could also
# predict encoded targets, but it's not advisable
warnings.warn(
"y should contain the name of the labels as strings, e.g. "
"'positive' and 'negative', don't pass label-encoded targets"
)
def fit(self, X, y, **fit_params):
"""Prepare everything to enable predictions.
There is no actual fitting going on here, as the LLM is used as is.
Parameters
----------
X : array-like of shape (n_samples,)
The input data. For zero-shot classification, this can be ``None``.
y : array-like of shape (n_samples,)
The target classes. Ensure that each class that the LLM should be able
to predict is present at least once. Classes that are not present
during the ``fit`` call will never be predicted.
**fit_params : dict
Additional fitting parameters. This is mostly a placeholder for
sklearn-compatibility, as there is no actual fitting process.
Returns
-------
self
The fitted estimator.
"""
return self._fit(X, y, **fit_params)
| ZeroShotClassifier |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1377332,
"end": 1381845
} | class ____(sgqlc.types.Type, Node):
"""A threaded list of comments for a given pull request."""
__schema__ = github_schema
__field_names__ = (
"comments",
"diff_side",
"is_collapsed",
"is_outdated",
"is_resolved",
"line",
"original_line",
"original_start_line",
"path",
"pull_request",
"repository",
"resolved_by",
"start_diff_side",
"start_line",
"subject_type",
"viewer_can_reply",
"viewer_can_resolve",
"viewer_can_unresolve",
)
comments = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestReviewCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("skip", sgqlc.types.Arg(Int, graphql_name="skip", default=None)),
)
),
)
"""A list of pull request comments associated with the thread.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `skip` (`Int`): Skips the first _n_ elements in the list.
"""
diff_side = sgqlc.types.Field(sgqlc.types.non_null(DiffSide), graphql_name="diffSide")
"""The side of the diff on which this thread was placed."""
is_collapsed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCollapsed")
"""Whether or not the thread has been collapsed (resolved)"""
is_outdated = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isOutdated")
"""Indicates whether this thread was outdated by newer changes."""
is_resolved = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isResolved")
"""Whether this thread has been resolved"""
line = sgqlc.types.Field(Int, graphql_name="line")
"""The line in the file to which this thread refers"""
original_line = sgqlc.types.Field(Int, graphql_name="originalLine")
"""The original line in the file to which this thread refers."""
original_start_line = sgqlc.types.Field(Int, graphql_name="originalStartLine")
"""The original start line in the file to which this thread refers
(multi-line only).
"""
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
"""Identifies the file path of this thread."""
pull_request = sgqlc.types.Field(sgqlc.types.non_null(PullRequest), graphql_name="pullRequest")
"""Identifies the pull request associated with this thread."""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""Identifies the repository associated with this thread."""
resolved_by = sgqlc.types.Field("User", graphql_name="resolvedBy")
"""The user who resolved this thread"""
start_diff_side = sgqlc.types.Field(DiffSide, graphql_name="startDiffSide")
"""The side of the diff that the first line of the thread starts on
(multi-line only)
"""
start_line = sgqlc.types.Field(Int, graphql_name="startLine")
"""The start line in the file to which this thread refers (multi-line
only)
"""
subject_type = sgqlc.types.Field(sgqlc.types.non_null(PullRequestReviewThreadSubjectType), graphql_name="subjectType")
"""The level at which the comments in the corresponding thread are
targeted, can be a diff line or a file
"""
viewer_can_reply = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanReply")
"""Indicates whether the current viewer can reply to this thread."""
viewer_can_resolve = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanResolve")
"""Whether or not the viewer can resolve this thread"""
viewer_can_unresolve = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanUnresolve")
"""Whether or not the viewer can unresolve this thread"""
| PullRequestReviewThread |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 91762,
"end": 94149
} | class ____(SingleContinuousDistribution):
_argnames=('alpha','a','b')
@property
def set(self):
return Interval(self.a, self.b)
@staticmethod
def check(alpha, a, b):
_value_check(a.is_real, "Continuous Boundary parameter should be real.")
_value_check(b.is_real, "Continuous Boundary parameter should be real.")
_value_check(a < b, " 'a' the left Boundary must be smaller than 'b' the right Boundary." )
_value_check(alpha.is_positive, "Continuous Shape parameter should be positive.")
def pdf(self, x):
alpha, a, b = self.alpha, self.a, self.b
num = alpha*(x - a)**(alpha - 1)
den = (b - a)**alpha
return num/den
def PowerFunction(name, alpha, a, b):
r"""
Creates a continuous random variable with a Power Function Distribution.
Explanation
===========
The density of PowerFunction distribution is given by
.. math::
f(x) := \frac{{\alpha}(x - a)^{\alpha - 1}}{(b - a)^{\alpha}}
with :math:`x \in [a,b]`.
Parameters
==========
alpha : Positive number, `0 < \alpha`, the shape parameter
a : Real number, :math:`-\infty < a`, the left boundary
b : Real number, :math:`a < b < \infty`, the right boundary
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import PowerFunction, density, cdf, E, variance
>>> from sympy import Symbol
>>> alpha = Symbol("alpha", positive=True)
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = PowerFunction("X", 2, a, b)
>>> density(X)(z)
(-2*a + 2*z)/(-a + b)**2
>>> cdf(X)(z)
Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) +
z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))
>>> alpha = 2
>>> a = 0
>>> b = 1
>>> Y = PowerFunction("Y", alpha, a, b)
>>> E(Y)
2/3
>>> variance(Y)
1/18
References
==========
.. [1] https://web.archive.org/web/20200204081320/http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html
"""
return rv(name, PowerFunctionDistribution, (alpha, a, b))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
| PowerFunctionDistribution |
python | google__jax | tests/pmap_test.py | {
"start": 3353,
"end": 83480
} | class ____(jtu.JaxTestCase):
@property
def pmap(self):
return src_api.pmap
def testDeviceBufferToArray(self):
# NOTE(dsuo): Under `pmap_shmap_merge=True`, the resulting array is sharded,
# whereas under `pmap_shmap_merge=False`, the resulting array is
# "SingleDeviceSharded". The attribute `unsafe_buffer_pointer` is
# unavailable for sharded arrays.
if config.pmap_shmap_merge.value:
self.skipTest("Test fails because pmap is jit(shmap).")
sda = self.pmap(lambda x: x)(jnp.ones((jax.device_count(), 2)))
# Changed in https://github.com/jax-ml/jax/pull/10584 not to access
# sda.device_buffers, which isn't supported, and instead ensure fast slices
# of the arrays returned by pmap are set up correctly.
# buf = sda.device_buffers[-1]
buf = sda[-1]
view = jnp.array(buf, copy=False)
self.assertArraysEqual(sda[-1], view)
self.assertSetEqual(buf.devices(), view.devices())
self.assertEqual(buf.unsafe_buffer_pointer(), view.unsafe_buffer_pointer())
copy = jnp.array(buf, copy=True)
self.assertArraysEqual(sda[-1], copy)
self.assertSetEqual(buf.devices(), copy.devices())
self.assertNotEqual(buf.unsafe_buffer_pointer(), copy.unsafe_buffer_pointer())
def _getMeshShape(self, device_mesh_shape):
device_count = jax.device_count()
if any(size == -1 for size in device_mesh_shape):
try:
return np.arange(device_count).reshape(device_mesh_shape).shape
except ValueError as err:
msg = "device mesh shape {} not compatible with device count {}"
raise SkipTest(msg.format(device_mesh_shape, device_count)) from err
else:
if device_count % math.prod(device_mesh_shape):
msg = "device mesh size {} does not divide available device count {}"
raise SkipTest(msg.format(math.prod(device_mesh_shape), device_count))
else:
return device_mesh_shape
def testBasic(self):
f = self.pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDefaultDeviceOrdering(self):
# Users rely on the fact that the default order of jax.devices() matches
# the default order of pmap for single-host jobs.
device_order = jax.devices()
pmap_sharding = pmap(lambda x: x)(np.arange(jax.device_count())).sharding
if config.pmap_shmap_merge.value:
self.assertListEqual(device_order, list(pmap_sharding._device_assignment))
else:
self.assertListEqual(device_order, pmap_sharding.devices.tolist())
def testLowerCompile(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = f(x)
lowered = f.lower(x)
compiled = lowered.compile()
ans = compiled(x)
self.assertAllClose(ans, expected)
# It's a pair of: (positional args, as a tuple of their structures, kwargs).
for obj in [lowered, compiled]:
self.assertFalse(obj._no_kwargs)
self.assertEqual(obj.in_tree, jax.tree.flatten(((0,), {}))[1])
self.assertEqual(obj.in_avals, ((core.ShapedArray(x.shape, x.dtype),), {}))
def testLowerCompileInTreeMismatch(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f_exe = f.lower(x).compile()
self.assertRaisesRegex(
TypeError,
'Function compiled with input pytree does not match the input pytree it'
' was called with',
lambda: f_exe([x]))
def testLowerCompileTrivial(self):
f = self.pmap(lambda x: x, axis_name='i')
x = np.arange(jax.device_count(), dtype=np.float32)
expected = f(x)
f_exe = f.lower(x).compile()
ans = f_exe(x)
self.assertAllClose(ans, expected)
def testLowerCompileTrivialInTreeMismatch(self):
f = self.pmap(lambda x: x, axis_name='i')
x = np.arange(jax.device_count(), dtype=np.float32)
f_exe = f.lower(x).compile()
self.assertRaisesRegex(
TypeError,
'Function compiled with input pytree does not match the input pytree it'
' was called with',
lambda: f_exe([x]))
def testLowerCompileArgTypeMismatch(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=int).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
f_exe = f.lower(x_f32).compile()
if config.pmap_shmap_merge.value:
expected_regex = r"Argument types differ .*"
r"The mismatches are:\n"
r"Argument 'args[0]' compiled with.*float32.*and called with.*int32.*"
else:
expected_regex = r"Argument types differ .*"
r"The mismatches are:\n"
r"Argument 'x' compiled with.*float32.*and called with.*int32.*"
self.assertRaisesRegex(
TypeError,
expected_regex,
lambda: f_exe(x_i32))
def testLowerCompileMultiArg(self):
f = self.pmap(lambda x, y: x - lax.pmean(y, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = y = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = f(x, y)
f_exe = f.lower(x, y).compile()
ans = f_exe(x, y)
self.assertAllClose(ans, expected)
def testLowerCompileTrivialMultiArg(self):
f = self.pmap(lambda x, y: (x, y), axis_name='i')
x = y = np.arange(jax.device_count(), dtype=np.float32)
expected = f(x, y)
f_exe = f.lower(x, y).compile()
ans = f_exe(x, y)
self.assertAllClose(ans, expected)
def testLowerAsText(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x)
self.assertIsInstance(f.as_text(), str)
self.assertIsInstance(f.as_text(dialect='hlo'), str)
self.assertIsInstance(f.as_text(dialect='stablehlo'), str)
def testLowerCompilerIR(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect='stablehlo'))
def testLowerCompileCompilerIR(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x).compile()
self.assertIsNotNone(f.runtime_executable())
def testLowerCompileAsText(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x).compile()
self.assertIsInstance(f.as_text(), (str, type(None)))
def testLowerCostAnalysis(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x)
f.cost_analysis() # doesn't raise
def testLowerCompileCostAnalysis(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x).compile()
f.cost_analysis() # doesn't raise
def testLowerCompileMemoryAnalysis(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x).compile()
f.memory_analysis() # doesn't raise
def testLowerCompileExecutable(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f = f.lower(x).compile()
self.assertIsNotNone(f.runtime_executable())
def test_jit_lower_compile_with_compiler_options(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
lowered = f.lower(x)
lowered.compile( # doesn't crash
compiler_options={"xla_embed_ir_in_executable": True})
def test_jit_lower_compile_with_compiler_options_invalid(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
lowered = f.lower(x)
self.assertRaisesRegex(
jax.errors.JaxRuntimeError, "No such compile option: 'invalid_key'",
lambda: lowered.compile(
compiler_options={"invalid_key": "invalid_value"}))
self.assertRaisesRegex(
jax.errors.JaxRuntimeError, "is not a valid bool value.",
lambda: lowered.compile(
compiler_options={"xla_embed_ir_in_executable": "invalid_value"}))
def test_pmap_replicated_copy(self):
# https://github.com/jax-ml/jax/issues/17690
inp = jnp.arange(jax.device_count())
x = jax.pmap(lambda x: x, in_axes=0, out_axes=None)(inp)
out = jnp.copy(x)
if config.pmap_shmap_merge.value:
self.assertIsInstance(out.sharding, jax.sharding.NamedSharding)
else:
self.assertIsInstance(out.sharding, jax.sharding.SingleDeviceSharding)
self.assertArraysEqual(out, inp[0])
def test_jit_lower_compile_with_compiler_options_multiple(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
lowered = f.lower(x)
l1 = lowered.compile()
l2 = lowered.compile(
compiler_options={"xla_embed_ir_in_executable": True})
l3 = lowered.compile(
compiler_options={"xla_embed_ir_in_executable": False})
# Ideally we could test that these objects are different only in
# that they respect the different options. Object identity is a
# heuristic proxy for that.
self.assertTrue(l1 is not l2)
self.assertTrue(l1 is not l3)
self.assertTrue(l2 is not l3)
# We should still error on invalid options after some valid compiles
self.assertRaisesRegex(
jax.errors.JaxRuntimeError, "No such compile option: 'invalid_key'",
lambda: lowered.compile(
compiler_options={"invalid_key": "invalid_value"}))
def testLowerShapedArray(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
x_shape = core.ShapedArray(x.shape, x.dtype)
ans = f.lower(x_shape).compile()(x)
self.assertAllClose(ans, f(x))
def testLowerHasReplicaAttributes(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
num_devices = jax.device_count()
shape = (num_devices, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
lowered = f.lower(x)
hlo = lowered.as_text("stablehlo")
if config.pmap_shmap_merge.value:
self.assertIn(f"mhlo.num_partitions = {num_devices}", hlo)
self.assertIn("mhlo.num_replicas = 1", hlo)
else:
self.assertIn(f"mhlo.num_replicas = {num_devices}", hlo)
self.assertIn("mhlo.num_partitions = 1", hlo)
def testMean(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.broadcast_to(np.mean(x, 0), x.shape)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGather(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x] * jax.device_count())
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherBool(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
x = (x % 2).astype(np.bool_)
expected = np.array([x] * jax.device_count())
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherNegativeAxis(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i', axis=-1), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x.T] * jax.device_count())
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherTiled(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i', tiled=True), axis_name='i')
device_count = jax.device_count()
shape = (device_count, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x] * device_count).reshape(device_count, -1)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherTiledNegativeAxis(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i', tiled=True, axis=-1),
axis_name='i')
device_count = jax.device_count()
shape = (device_count, 4, 3)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x.transpose(1, 0, 2).reshape(4, -1)] * device_count)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters([
('Gather', lax.all_gather),
('ReduceScatter', lax.psum_scatter)
])
def testVmapOf(self, prim):
f = self.pmap(partial(prim, axis_name='i'), axis_name='i')
device_count = jax.device_count()
shape = (4, device_count, device_count)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
self.assertAllClose(vmap(f)(x), jnp.stack([f(xs) for xs in x], axis=0))
def testReduceScatter(self):
f = self.pmap(lambda x: lax.psum_scatter(x, 'i'), axis_name='i')
device_count = jax.device_count()
shape = (device_count, device_count)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.sum(x, axis=0)
ans = f(x)
for i, actual in enumerate(ans):
self.assertAllClose(actual, expected[i])
def testReduceScatterTiled(self):
f = self.pmap(lambda x: lax.psum_scatter(x, 'i', tiled=True), axis_name='i')
device_count = jax.device_count()
shape = (device_count, 4 * device_count)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.sum(x, axis=0)
ans = f(x)
scatter_len = len(expected) // device_count
for i, actual in enumerate(ans):
self.assertAllClose(actual,
expected[i * scatter_len:(i + 1) * scatter_len])
def testReduceScatterReplicaGroupsTiled(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = [[i for i in range(jax.device_count()) if i % 2 == 0],
[i for i in range(jax.device_count()) if i % 2 != 0]]
f = lambda x: lax.psum_scatter(
x, 'i', axis_index_groups=axis_index_groups, tiled=True)
f = self.pmap(f, axis_name='i')
shape = (replicas, 4 * replicas)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
group_1_result = np.sum(x[0::2,:], axis=0)
group_2_result = np.sum(x[1::2,:], axis=0)
# the result is scattered over (replicas // 2) devices
scatter_len = len(group_1_result) * 2 // replicas
for i, actual in enumerate(ans):
expected = group_1_result if i % 2 == 0 else group_2_result
self.assertAllClose(
actual, expected[i // 2 * scatter_len:(i // 2 + 1) * scatter_len])
def testTrees(self):
ptranspose = lambda x, axis_name: lax.all_to_all(x, axis_name, 0, 0)
def protate(x, axis_name):
n = lax.axis_size(axis_name)
return lax.ppermute(x, axis_name, [(i, (i + 1) % n) for i in range(n)])
tree_f = lambda f: partial(jax.tree.map, f)
jax_f = lambda p: self.pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
np_transpose = tree_f(np.transpose)
np_rotate = tree_f(lambda x: np.concatenate([x[-1:], x[:-1]]))
n = jax.device_count()
x = {'a': np.arange(1 * n * n, 2 * n * n).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n).reshape([n, n])}
assert_allclose = partial(jax.tree.map,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
assert_allclose(jax_f(ptranspose)(x), np_transpose(x))
assert_allclose(jax_f(protate)(x), np_rotate(x))
def testCollectivesWithTreesOfDifferentDtypes(self):
n = len(jax.devices())
x = {'a': np.arange(1 * n * n, 2 * n * n, dtype=np.float32).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n, dtype=np.int32).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n, dtype=np.float32).reshape([n, n]),
'd': np.arange(6 * n * n, 7 * n * n, dtype=np.int32).reshape([n, n])}
tree_f = lambda f: partial(jax.tree.map, f)
jax_f = lambda p: self.pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
assert_allclose = partial(jax.tree.map,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
def testComplexPsum(self):
f = self.pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4 * 2)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape).view(np.complex64)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.sample_product(
split_axis=list(range(2)),
concat_axis=list(range(2)),
dtype=lax_test_util.all_dtypes,
)
def testAllToAll(self, split_axis, concat_axis, dtype):
pmap_in_axis = 0
shape = (jax.device_count(),) * 3
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
@partial(self.pmap, axis_name='i')
def f(x):
return lax.all_to_all(x, 'i', split_axis, concat_axis)
y = f(x)
if pmap_in_axis <= split_axis:
split_axis += 1
ref = jnp.moveaxis(x, (pmap_in_axis, split_axis),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}",
"split_axis": split_axis, "concat_axis": concat_axis}
for split_axis, concat_axis in it.product(range(2), range(2)))
def testAllToAllSplitAxis(self, split_axis, concat_axis):
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
if jtu.device_under_test() == "gpu":
raise SkipTest("TODO(b/456133538): Disable on GPUs until we figure out.")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
pmap_in_axis = 0
shape = (4, 4, 4)
x = np.arange(math.prod(shape)).reshape(shape)
@partial(self.pmap, axis_name='i')
@partial(self.pmap, axis_name='j')
def f(x):
return lax.all_to_all(x, ('i', 'j'), split_axis, concat_axis)
unroll_shape = (2, 2, *shape[1:])
x_unroll = x.reshape(unroll_shape)
y_unroll = f(x_unroll)
y = y_unroll.reshape(shape)
if pmap_in_axis <= split_axis:
split_axis += 1
ref = jnp.moveaxis(x, (pmap_in_axis, split_axis),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
def testNestedPmapAxisSwap(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
# Regression test for https://github.com/jax-ml/jax/issues/5757
if jax.device_count() < 8:
raise SkipTest("test requires at least 8 devices")
f = jax.pmap(jax.pmap(lambda x: x, in_axes=1, out_axes=0), in_axes=0,
out_axes=0)
A = jnp.ones((2, 4, 3))
self.assertAllClose(A.transpose((0, 2, 1)), f(A))
def testNestedBasic(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
f = lambda x: lax.psum(lax.psum(x, 'i'), 'j')
f = self.pmap(self.pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
shape = (jax.device_count(), 1, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMismatchedAxisSizes(self):
n = jax.device_count()
f = self.pmap(lambda x, y: x + y)
self.assertRaisesRegex(
ValueError,
"pmap got inconsistent sizes for array axes to be mapped",
lambda: f(self.rng().randn(n), self.rng().randn(n - 1)))
def testInAxesPyTreePrefixMismatchError(self):
x = jnp.array([3.14])
f = self.pmap(lambda x, y: x, in_axes=((0, 0, 0), 0))
with self.assertRaisesRegex(ValueError, re.escape("pmap in_axes[0][0]")):
f((x, x), x)
def testInAxesPyTreePrefixMismatchErrorKwargs(self):
x = jnp.array([3.14])
f = self.pmap(lambda x, y: x, in_axes=((0, 0), 0))
with self.assertRaisesRegex(
ValueError, re.escape("each argument passed by keyword is mapped")):
f(x=(x, x), y=x)
def testOutAxesPyTreePrefixMismatchError(self):
x = jnp.array([3.14])
f = jax.pmap(lambda x, y: ((x, x), x), out_axes=((0, 0, 0), 0))
with self.assertRaisesRegex(ValueError, re.escape("pmap out_axes[0]")):
f(x, x)
@parameterized.named_parameters(
{"testcase_name": f"_mesh={device_mesh_shape}".replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedShardingAndStacking(self, device_mesh_shape):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
mesh_shape = self._getMeshShape(device_mesh_shape)
f = lambda x: x
f = self.pmap(self.pmap(f, 'i'), 'j')
shape = mesh_shape + (4,)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = x
self.assertEqual(ans.shape, expected.shape)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPartiallyMapped(self):
f = self.pmap(lambda x, y: x, in_axes=(None, 0))
g = self.pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
mesh_shape = (jax.device_count(),)
shape = mesh_shape + (4,)
x = np.array(3., dtype=np.float32)
y = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
f_expected = np.broadcast_to(x, mesh_shape)
f_ans = f(x, y)
self.assertAllClose(f_ans, f_expected)
self.assertIsInstance(f_ans, array.ArrayImpl)
if config.pmap_shmap_merge.value:
if jax.device_count() == 1:
self.assertEmpty(f_ans.sharding.spec)
else:
self.assertLen(f_ans.sharding.spec, 1)
axis = f_ans.sharding.spec[0]
self.assertEqual(axis, f_ans.sharding.mesh.axis_names[0])
else:
sharding_spec = f_ans.sharding.sharding_spec
# the output is actually replicated (has the same values in each device
# buffer) but out_axes is implicitly 0, so we shouldn't have replication
# in the sharding spec.
self.assertEmpty([a for a in sharding_spec.mesh_mapping
if isinstance(a, pxla.Replicated)])
g_expected = np.broadcast_to(x - np.sum(y, 0, keepdims=True), shape)
g_ans = g(x, y)
self.assertAllClose(g_ans, g_expected)
self.assertIsInstance(g_ans, array.ArrayImpl)
if config.pmap_shmap_merge.value:
if jax.device_count() == 1:
self.assertEmpty(g_ans.sharding.spec)
else:
self.assertLen(g_ans.sharding.spec, 1)
axis = g_ans.sharding.spec[0]
self.assertEqual(axis, g_ans.sharding.mesh.axis_names[0])
else:
sharding_spec = g_ans.sharding.sharding_spec
self.assertEmpty([a for a in sharding_spec.mesh_mapping
if isinstance(a, pxla.Replicated)])
@parameterized.named_parameters(
{"testcase_name": f"_mesh={device_mesh_shape}".replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testPartiallyMappedNested(self, device_mesh_shape):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
mesh_shape = self._getMeshShape(device_mesh_shape)
f = self.pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
f = self.pmap(f, axis_name='j', in_axes=(None, 0))
x = 3.
y = np.arange(math.prod(mesh_shape), dtype=np.float32).reshape(mesh_shape)
expected = np.broadcast_to(x - np.sum(y, 1, keepdims=True), mesh_shape)
ans = f(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
def testJvpAndPartialEval(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = np.cos(x)
ans = splitjvp(x)
self.assertAllClose(ans, expected, check_dtypes=False)
make_jaxpr(splitjvp)(x) # doesn't crash
def testGradBasic(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfPsum(self):
@partial(self.pmap, axis_name='i')
def f(x):
return lax.psum(x, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testGradOfJvp(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
fun = lambda x: jnp.sum(jvp(jnp.sin, (x,), (jnp.ones_like(x),))[1])
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(splitjvp(x)))(x)
expected = grad(fun)(x)
self.assertAllClose(ans, expected)
def testTwoArgsGrad(self):
def f(x, y):
return lax.psum(5. * jnp.cos(x) * jnp.sin(y), 'i')
f = self.pmap(f, 'i')
def g(x, y):
tot = jnp.sum(5. * jnp.cos(x) * jnp.sin(y))
return tot * jnp.ones_like(x) # broadcast to map like pjit does
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
y = 4 + x
ans = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
expected = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_mesh={device_mesh_shape}".replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedWithClosure(self, device_mesh_shape):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
mesh_shape = self._getMeshShape(device_mesh_shape)
@partial(self.pmap, axis_name='i')
def test_fun(x):
y = jnp.sum(jnp.sin(x))
@partial(self.pmap, axis_name='j')
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
@vmap
def baseline_fun(x):
y = jnp.sum(jnp.sin(x))
@vmap
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
shape = mesh_shape + (4,)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(test_fun(x)))(x)
expected = grad(lambda x: jnp.sum(baseline_fun(x)))(x)
self.assertAllClose(ans, expected, atol=1e-3, rtol=1e-3)
@jtu.ignore_warning(category=DeprecationWarning)
def testArrays(self):
inner_f = lambda x: 2 * x
f = self.pmap(inner_f, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
# test that we can pass in and out Arrays
y = f(x)
self.assertIsInstance(y, jax.Array)
self.assertIsInstance(y, array.ArrayImpl)
self.assertNotIsInstance(y, np.ndarray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
z = f(y)
self.assertIsInstance(z, array.ArrayImpl)
self.assertNotIsInstance(z, np.ndarray)
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can pass in a regular Array
y = f(device_put(x))
self.assertIsInstance(y, array.ArrayImpl)
self.assertAllClose(y, 2 * x, check_dtypes=False)
# test that we can pass an Array to a regular jit computation
z = y + y
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can handle device movement on dispatch
bufs = y._arrays[::-1]
devices = [list(b.devices())[0] for b in bufs]
if config.pmap_shmap_merge.value:
mesh = jax.sharding.Mesh(devices, 'i')
sharding = jax.sharding.NamedSharding(mesh, y.sharding.spec)
# NOTE(dsuo): Need to redefine pmap with the updated devices.
f = self.pmap(inner_f, axis_name='i', devices=devices)
else:
sharding = jax.sharding.PmapSharding(devices, y.sharding.sharding_spec)
y = jax.make_array_from_single_device_arrays(y.shape, sharding, bufs)
z = f(y)
self.assertAllClose(z, 2 * 2 * x[::-1], check_dtypes=False)
# test that the repr doesn't crash
repr(z)
# test that we can lexically capture a sda as a constant.
g = jit(lambda z: z + y)
self.assertAllClose(g(7), y + 7)
# Tests edge cases in lax._reshape_sharded_device_array
@parameterized.named_parameters(
{"testcase_name": f"_in={in_shape}_out={out_shape}"
.replace(" ", ""),
"in_shape": in_shape, "out_shape": out_shape}
for in_shape, out_shape in [
[(1,1), (1,)], [(1,), (1,1)], [(1,), ()], [(4,7), (2,2,7)]
])
def testArrayReshape(self, in_shape, out_shape):
if jax.device_count() < max(in_shape[:1] + out_shape[:1]):
raise SkipTest("not enough devices")
x = np.arange(math.prod(in_shape)).reshape(in_shape)
sharded_x = self.pmap(lambda x: x)(x)
self.assertAllClose(sharded_x.reshape(out_shape), x.reshape(out_shape),
check_dtypes=False)
def testPsumMultiple(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
f = lambda x: lax.psum(x, ('i', 'j'))
f = self.pmap(self.pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
device_count = jax.device_count()
num_pairs, ragged = divmod(device_count, 2)
if num_pairs > 1 and not ragged:
shape = (num_pairs, 2, 4)
else:
shape = (device_count, 1, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPsumConstantReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas).reshape(
2, replicas // 2).tolist()
f = lambda x: x - lax.psum(2., 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected_psum = 2. * replicas // 2
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def testPsumUnevenReplicaGroups(self):
replicas = jax.device_count()
if replicas <= 2:
raise SkipTest("Test expected devices greater than 2.")
axis_index_groups = [[0,1], np.arange(2,replicas)]
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
def sum_helper(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(len(a), x.shape[1]))
expected_psum_1 = sum_helper(x[0:2])
expected_psum_2 = sum_helper(x[2:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPsumReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas).reshape(
2, replicas // 2).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
def sum_helper(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(replicas // 2, x.shape[1]))
expected_psum_1 = sum_helper(x[:replicas // 2])
expected_psum_2 = sum_helper(x[replicas // 2:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest("Test expected an even number of devices greater than 1.")
axis_index_groups = np.arange(replicas, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((replicas // 2, 2)).T
axis_index_groups = axis_index_groups.tolist()
f = lambda x: lax.all_gather(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
group_1_result = x[0::2]
group_2_result = x[1::2]
expected = np.empty((replicas, replicas // 2, x.shape[1]))
expected[0::2] = group_1_result
expected[1::2] = group_2_result
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherReplicaGroupsInterleaved(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest("Test expected an even number of devices greater than 1.")
indexes = np.arange(replicas)
indexes = np.concatenate([indexes[::2], indexes[1::2]])
axis_index_groups = indexes.reshape(2, replicas // 2).tolist()
f = lambda x: lax.all_gather(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = np.zeros((replicas, replicas // 2, x.shape[1]))
expected[::2] = x[::2]
expected[1::2] = x[1::2]
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(it.chain.from_iterable([
(name, prim, False, False),
(name + 'Tiled', prim, True, False),
(name + 'IndexGroups', prim, False, True),
] for name, prim in
(('Gather', lax.all_gather), ('ReduceScatter', lax.psum_scatter))
))
def testGradOf(self, prim, tiled, use_axis_index_groups):
axis_index_groups = None
devices = jax.devices()
if use_axis_index_groups:
if len(devices) < 2:
raise SkipTest("Need at least two devices")
axis_index_groups = [(l.id, r.id)
for l, r in np.asarray(devices).reshape(-1, 2)]
@partial(self.pmap, axis_name='i')
def f(x):
return prim(x, axis_name='i', tiled=tiled,
axis_index_groups=axis_index_groups)
shape = (len(devices), 2 if axis_index_groups else jax.device_count())
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testNestedPmapReplicaGroups(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
replicas = jax.device_count()
if replicas % 4 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas // 2).reshape(
2, replicas // 4).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f1 = self.pmap(self.pmap(f, 'i'), 'j')
f2 = self.pmap(lambda x: self.pmap(f, 'i')(x) + 1., 'j') # "imperfectly nested" case
f3 = self.pmap(self.pmap(f, 'j'), 'i')
shape = (2, replicas // 2, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f1(a):
return np.broadcast_to(a.sum(1, keepdims=True),
(shape[0], shape[1] // 2, shape[2]))
expected_psum_1 = sum_helper_f1(x[:, :replicas // 4])
expected_psum_2 = sum_helper_f1(x[:, replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 1)
expected = x - expected_psum
ans = f1(x)
self.assertAllClose(ans, expected)
expected = x - expected_psum + 1.
ans = f2(x)
self.assertAllClose(ans, expected)
shape = (replicas // 2, 2, 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f3(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(shape[0] // 2, shape[1], shape[2]))
expected_psum_1 = sum_helper_f3(x[:replicas // 4])
expected_psum_2 = sum_helper_f3(x[replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f3(x)
self.assertAllClose(ans, expected)
def testAxisGroups(self):
axis_env = sharding_impls.AxisEnv(8, ('i', 'j'), (4, 2))
groups = pxla.axis_groups(axis_env, 'i')
self.assertEqual(groups, ((0, 2, 4, 6), (1, 3, 5, 7)))
groups = pxla.axis_groups(axis_env, 'j')
self.assertEqual(groups, ((0, 1), (2, 3), (4, 5), (6, 7)))
groups = pxla.axis_groups(axis_env, ('i', 'j'))
self.assertEqual(groups, ((0, 1, 2, 3, 4, 5, 6, 7,),))
groups = pxla.axis_groups(axis_env, ('j', 'i'))
self.assertEqual(len(groups), 1)
self.assertEqual((tuple(sorted(groups[0])),),
((0, 1, 2, 3, 4, 5, 6, 7,),)) # order doesn't matter
@jtu.run_on_devices("gpu")
def testCollectiveBroadcast(self):
device_count = jax.device_count()
f = lambda x: lax.pbroadcast(x, source=0, axis_name='i')
f = self.pmap(f, 'i')
x = jnp.arange(4 * device_count).reshape((device_count, 4))
ans = f(x)
expected = np.take(x, [0] * device_count, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.run_on_devices("gpu")
def testCollectiveBroadcastVmap(self):
device_count = jax.device_count()
f = lambda x: lax.pbroadcast(x, source=0, axis_name='i')
x = np.arange(device_count * 16, dtype=np.float32)
x = x.reshape((device_count, 4, 4))
ans = self.pmap(vmap(f), 'i')(x)
expected = jnp.broadcast_to(x[0:1], x.shape)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.run_on_devices("gpu")
def testCollectiveBroadcastGrad(self):
device_count = jax.device_count()
f = lambda x: lax.pbroadcast(x, source=0, axis_name='i')
x = np.arange(device_count, dtype=np.float32)
ans = self.pmap(grad(f), 'i')(x)
expected = np.zeros_like(x)
expected[0] = device_count
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectivePermute(self):
device_count = jax.device_count()
rotation = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')
f = self.pmap(f, 'i')
x = jnp.arange(4 * device_count).reshape((device_count, 4))
ans = f(x)
expected = np.roll(x, shift=1, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu")
def testCollectivePermuteGrad(self):
device_count = jax.device_count()
shift_right = [(i, (i + 1)) for i in range(device_count - 1)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * self.pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.concatenate([np.pi + np.arange(1, device_count), [0]])
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectivePermuteCyclicGrad(self):
device_count = jax.device_count()
shift_right = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * self.pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.roll(np.pi + np.arange(device_count), -1)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(g, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2)
def testCollectivePermuteCyclicWithPShuffle(self):
device_count = jax.device_count()
values = np.arange(device_count)
shift_right = [(i - 1) % device_count for i in range(device_count)]
f = lambda x: lax.pshuffle(x, perm=shift_right, axis_name='i')
expected = np.roll(values, 1)
ans = np.asarray(self.pmap(f, "i")(values))
self.assertAllClose(ans, expected, check_dtypes=False)
def testPShuffleWithBadPerm(self):
device_count = jax.device_count()
bad_perm = list(range(device_count))
bad_perm[0] = 1
f = lambda x: lax.pshuffle(x, perm=bad_perm, axis_name='i')
g = lambda: self.pmap(f, "i")(np.arange(device_count))
self.assertRaisesRegex(
ValueError,
"`perm` does not represent a permutation: \\[1.*\\]", g)
def testPpermuteWithZipObject(self):
# https://github.com/jax-ml/jax/issues/1703
num_devices = jax.device_count()
perm = [num_devices - 1] + list(range(num_devices - 1))
f = self.pmap(lambda x: lax.ppermute(x, "i", zip(perm, range(num_devices))), "i")
result = f(jnp.arange(num_devices, dtype=jnp.float32))
expected = jnp.asarray(perm, dtype=jnp.float32)
self.assertAllClose(result, expected)
def testRule30(self):
# This is a test of collective_permute implementing a simple halo exchange
# to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30
# Halo exchange should be useful in spatially-sharded convolutions and in
# other simulations.
device_count = jax.device_count()
def send_right(x, axis_name):
left_perm = [(i, (i + 1) % device_count) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def send_left(x, axis_name):
left_perm = [((i + 1) % device_count, i) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def update_board(board):
left = board[:-2]
right = board[2:]
center = board[1:-1]
return lax.bitwise_xor(left, lax.bitwise_or(center, right))
@partial(self.pmap, axis_name='i')
def step(board_slice):
left, right = board_slice[:1], board_slice[-1:]
right, left = send_left(left, 'i'), send_right(right, 'i')
enlarged_board_slice = jnp.concatenate([left, board_slice, right])
return update_board(enlarged_board_slice)
board = np.zeros(40, dtype=bool)
board[board.shape[0] // 2] = True
reshaped_board = board.reshape((device_count, -1))
boards = []
def print_board(board):
boards.append(''.join('*' if x else ' ' for x in board.ravel()))
print_board(reshaped_board)
for _ in range(9):
reshaped_board = step(reshaped_board)
print_board(reshaped_board)
ans = '\n'.join(boards)
expected = '\n'.join((
' * ',
' *** ',
' ** * ',
' ** **** ',
' ** * * ',
' ** **** *** ',
' ** * * * ',
' ** **** ****** ',
' ** * *** * ',
' ** **** ** * *** ',
))
print(ans)
self.assertEqual(ans, expected)
def testReduceMax(self):
f = self.pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.max(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testReduceMin(self):
f = self.pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.min(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDeviceCountError(self):
device_count = jax.device_count()
# NOTE(dsuo): The error message is different depending on the version of
# this test.
if config.pmap_shmap_merge.value:
expected_regex = r".*"
else:
expected_regex = r".*requires.*replicas"
f = self.pmap(lambda x: 2 * x)
x = jnp.arange(device_count + 1)
self.assertRaisesRegex(ValueError, expected_regex, lambda: f(x))
f = self.pmap(lambda x: 2 * x)
x = np.ones((device_count + 1, 10))
self.assertRaisesRegex(ValueError, expected_regex, lambda: f(x))
f = self.pmap(lambda x: self.pmap(lambda x: 2 * x)(x))
x = np.ones((device_count, 2, 10))
self.assertRaisesRegex(ValueError, expected_regex, lambda: f(x))
def testPmapConstant(self):
device_count = jax.device_count()
const = jnp.arange(16, dtype=np.int32) # distinctive shape
f = self.pmap(lambda x: x + const[15])
x = jnp.arange(device_count, dtype=np.int32)
expected = x + np.int32(15)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
if not config.disable_jit.value:
self.assertCacheMisses(lambda: f(x),
compilation_after_persistent_cache_miss=0)
if not config.disable_jit.value:
f = self.pmap(lambda x: x + const[15])
x = np.arange(device_count, dtype=np.int32)
with jtu.assert_num_jit_and_pmap_compilations(1):
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapConstantDevices(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
devices = jax.devices()[:-1]
shuffle(devices)
f = self.pmap(lambda x: 3, devices=devices)
x = jnp.arange(len(devices))
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count(), 0) # TODO(mattjj): don't compile for constants
expected = np.repeat(3, len(devices))
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
ans_devices = ans.sharding._device_assignment
# TODO(mattjj,sharadmv): fix physical layout with eager pmap, remove 'if'
if not config.disable_jit.value:
self.assertEqual(ans_devices, tuple(devices))
def testPmapConstantError(self):
device_count = jax.device_count()
f = self.pmap(lambda x: 3)
x = jnp.arange(device_count + 1)
if config.pmap_shmap_merge.value:
expected_regex = [
# NOTE(dsuo): We get different error messages depending on backend.
r'shard_map applied.*axis sizes.*not evenly divisible.*mesh axis sizes.*',
r'cannot select an axis to squeeze out which has size not equal to one.*',
r'Sharding.*implies that array.*but the dimension size is.*',
]
expected_regex = '|'.join(expected_regex)
else:
expected_regex = r'compiling computation that requires \d+ logical devices, but only \d+ XLA devices are available .*'
self.assertRaisesRegex(ValueError, expected_regex, lambda: f(x))
# TODO(mattjj): test error message with explicit devices
# f = pmap(lambda x: 3, devices=[jax.devices()[0]])
# x = jnp.arange(2)
# self.assertRaisesRegex(
# ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
# r"local devices are available.", lambda: f(x))
def testNestedPmapConstant(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
f = self.pmap(self.pmap(lambda x: 3))
shape = (2, jax.device_count() // 2, 3)
x = jnp.arange(math.prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count(), 0) # TODO(mattjj): don't compile for constants
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = self.pmap(self.pmap(lambda x: x))(expected)
self.assertTrue(ans.sharding._device_assignment,
expected_sharded.sharding._device_assignment)
f = self.pmap(self.pmap(lambda x: (x, 3)))
x_sharded, ans = f(x)
self.assertEqual(ans.sharding._device_assignment,
x_sharded.sharding._device_assignment)
@unittest.skip("Nested pmaps with devices not yet implemented")
def testNestedPmapConstantDevices(self):
if jax.device_count() < 6:
raise SkipTest("this test requires >= 6 devices")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
devices = jax.devices()[:-2]
shuffle(devices)
f = self.pmap(self.pmap(lambda x: 3), devices=devices)
shape = (2, len(devices) // 2, 3)
x = jnp.arange(math.prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count(), 0) # TODO(mattjj): don't compile for constants
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = self.pmap(self.pmap(lambda x: x), devices=devices)(expected)
self.assertTrue(ans.sharding == expected_sharded.sharding)
def testNestedPmapConstantError(self):
if config.disable_jit.value:
raise SkipTest("error test doesn't apply with disable_jit")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
f = self.pmap(self.pmap(lambda x: 3))
shape = (2, jax.device_count() // 2 + 1, 3)
x = jnp.arange(math.prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError,
(r"compiling computation that requires \d+ logical devices, "
r"but only \d+ XLA devices are available .*"),
lambda: f(x))
# TODO(mattjj): check error message with explicit devices
# if jax.device_count() > 1:
# f = pmap(pmap(lambda x: 3), devices=jax.devices()[:-1])
# shape = (2, jax.device_count() // 2, 3)
# x = jnp.arange(math.prod(shape)).reshape(shape)
# self.assertRaisesRegex(
# ValueError,
# (r"compiling computation that requires \d+ replicas, "
# r"but only \d+ XLA devices are available"),
# lambda: f(x))
def testCollectiveConstant(self):
device_count = jax.device_count()
f = self.pmap(lambda x: lax.axis_size('i'), 'i')
x = jnp.arange(device_count)
ans = f(x)
expected = np.repeat(device_count, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectiveConstantNested(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
device_count = jax.device_count()
@partial(self.pmap, axis_name='i')
def f(x):
@partial(self.pmap, axis_name='j')
def g(y):
a = lax.axis_size('i')
b = lax.axis_size('j')
c = lax.axis_size(('i', 'j'))
return a, b, c
return g(x)
shape = (device_count, 1, 4)
x = jnp.arange(math.prod(shape)).reshape(shape)
a, b, c = f(x)
self.assertEqual(a.shape, shape[:-1])
self.assertEqual(b.shape, shape[:-1])
self.assertEqual(c.shape, shape[:-1])
self.assertEqual(a.ravel()[0], device_count)
self.assertEqual(b.ravel()[0], 1)
self.assertEqual(c.ravel()[0], device_count * 1)
def testAxisIndex(self):
device_count = jax.device_count()
f = self.pmap(lambda x: x + lax.axis_index('i'), 'i')
x = jnp.ones(device_count, dtype='int32')
ans = f(x)
expected = 1 + np.arange(device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAxisIndexNestedPmap(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
device_count = jax.device_count()
if device_count < 4:
raise SkipTest("test requires at least four devices")
f = lambda axis: self.pmap(self.pmap(lambda x: x + lax.axis_index(axis), 'j'), 'i')
x = jnp.ones((2, 2), dtype='int32')
expected_j = np.broadcast_to(1 + np.arange(2), (2, 2))
self.assertAllClose(f('j')(x), expected_j, check_dtypes=False)
self.assertAllClose(f('i')(x), expected_j.T, check_dtypes=False)
def testAxisIndexNd(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
device_count = jax.device_count()
if device_count < 4:
raise SkipTest("test requires at least four devices")
f = lambda axes: self.pmap(self.pmap(lambda x: x + lax.axis_index(axes), 'j'), 'i')
x = jnp.ones((2, 2), dtype='int32')
expected = 1 + np.arange(4).reshape((2, 2))
self.assertAllClose(f(('i', 'j'))(x), expected, check_dtypes=False)
self.assertAllClose(f(('j', 'i'))(x), expected.T, check_dtypes=False)
def testAxisIndexInInitialStyle(self):
@partial(self.pmap, axis_name='i')
def f(x):
def body(carry, i):
return carry + i + lax.axis_index('i'), None
return lax.scan(body, 0, x)[0]
device_count = jax.device_count()
shape = (device_count, 10)
self.assertAllClose(f(jnp.ones(shape, dtype='int32')),
(jnp.arange(device_count, dtype='int32') + 1) * 10)
def testVmapOfPmap(self):
device_count = jax.device_count()
f0 = lambda x: x
f1 = self.pmap(f0, axis_name='i')
ax = self.rng().randn(2, device_count, 50, 60)
bx = vmap(f1)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmap2(self):
N_DEVICES = jax.device_count()
keys = random.split(random.PRNGKey(1), 13) # [13, 2]
@self.pmap
def g(key):
_ = random.normal(key, ())
return 0.
@vmap
def s(keys):
keys = jax.tree.map(
lambda x: jnp.broadcast_to(x, (N_DEVICES,) + x.shape),
keys)
return g(keys)
ans = s(keys) # doesn't crash
self.assertEqual(ans.shape, (13, N_DEVICES))
def testVmapOfPmap3(self):
# https://github.com/jax-ml/jax/issues/3399
device_count = jax.device_count()
if device_count < 2:
raise SkipTest("test requires at least two devices")
def map_version(qs, pts):
return jax.lax.map(lambda x: func(x, pts), qs)
def vmap_version(qs, pts):
return jax.vmap(func, in_axes=(0, None))(qs, pts)
def func(q, pts):
q_from_pmap = self.pmap(lambda x, y: y, in_axes=(0, None))(pts, q)
return q, q_from_pmap
pts = jnp.ones(device_count)
qs = jnp.asarray(((0,0), (3,3), (2,2)))
with ignore_jit_of_pmap_warning():
_, expected = map_version(qs, pts)
_, ans = vmap_version(qs, pts)
self.assertAllClose(ans, expected, check_dtypes=False)
def testVmapOfPmapNonLeadingAxis(self):
device_count = jax.device_count()
f0 = lambda x: x
f1 = self.pmap(f0, axis_name='i')
ax = self.rng().randn(device_count, 2, 50, 60)
bx = vmap(f1, in_axes=2, out_axes=2)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmapTuple(self):
device_count = jax.device_count()
f0 = lambda *x: x
f1 = self.pmap(f0, axis_name='i')
ax = self.rng().randn(device_count, 2, 50, 60)
ay = self.rng().randn(device_count, 30, 2)
az1 = self.rng().randn(device_count, 20)
az2 = self.rng().randn(2, device_count, 20)
bx, by, bz = vmap(f1, in_axes=(1, 2, (None, 0)), out_axes=(1, 2, 0))(ax, ay, (az1, az2))
self.assertAllClose(ax, bx, check_dtypes=False)
self.assertAllClose(ay, by, check_dtypes=False)
bz1, bz2 = bz
expected_bz1 = np.broadcast_to(az1, (2,) + az1.shape)
self.assertAllClose(expected_bz1, bz1, check_dtypes=False)
self.assertAllClose(bz2, bz2, check_dtypes=False)
def testPswapaxes(self):
device_count = jax.device_count()
shape = (device_count, 3, device_count, 5)
x = np.arange(math.prod(shape)).reshape(shape)
ans = self.pmap(lambda x: lax.pswapaxes(x, 'i', 1), axis_name='i')(x)
expected = np.swapaxes(x, 0, 2)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfPswapaxes(self):
device_count = jax.device_count()
shape = (device_count, 1, device_count)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
w = np.arange(device_count, dtype=np.float32)
@partial(self.pmap, axis_name='i')
def f(x, w):
g = lambda x: jnp.sum(lax.pswapaxes(x, 'i', 1) * w)
return grad(g)(x)
ans = f(x, w)
expected = np.tile(w, reps=device_count).reshape(shape)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAllToAllReplicaGroups(self):
# If num_devices = 4, these would be the inputs/outputs:
# input = [[0, 1], [2, 3], [4, 5], [6, 7]]
# axis_index_groups = [[0, 2], [1, 3]]
# output = [[0, 4], [2, 6], [1, 5], [3, 7]]
#
# This is essentially like splitting the number of rows in the input in two
# groups of rows, and swapping the two inner axes (axis=1 and axis=2), which
# is exactly what the test case checks.
device_count = jax.device_count()
if device_count % 2 != 0:
raise SkipTest('test requires an even number of devices')
shape = (device_count, device_count // 2)
x = np.arange(math.prod(shape)).reshape(shape)
axis_index_groups = np.arange(device_count, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((device_count // 2, 2)).T
axis_index_groups = axis_index_groups.tolist()
@partial(self.pmap, axis_name='i')
def fn(x):
return lax.all_to_all(x, 'i', 0, 0, axis_index_groups=axis_index_groups)
expected = np.swapaxes(
x.reshape((device_count // 2, 2, device_count // 2)),
0, 2).reshape(shape)
self.assertAllClose(fn(x), expected, check_dtypes=False)
def testGradOfAllToAllReplicaGroups(self):
device_count = jax.device_count()
if device_count % 2 != 0:
raise SkipTest('test requires an even number of devices')
shape = (device_count, device_count // 2, 1)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
w = np.arange(device_count, dtype=np.float32)
axis_index_groups = np.arange(device_count, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((2, device_count // 2))
axis_index_groups = axis_index_groups.tolist()
@partial(self.pmap, axis_name='i')
def fn(x, w):
g = lambda x: jnp.sum(lax.all_to_all(x, 'i', 0, 1, axis_index_groups=axis_index_groups) * w)
return grad(g)(x)
expected = np.ones_like(x) * w[:, np.newaxis, np.newaxis]
expected = np.swapaxes(
expected.reshape((2, device_count // 2, device_count // 2)),
1, 2).reshape(shape)
self.assertAllClose(fn(x, w), expected, check_dtypes=False)
def testArrayBlockUntilReady(self):
x = np.arange(jax.device_count())
x = self.pmap(lambda x: x)(x)
x.block_until_ready() # doesn't crash
@ignore_jit_of_pmap_warning()
def testJitPmapComposition(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = jit(self.pmap(f, 'i'))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = self.pmap(jit(f), 'i')(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCompositionWithJitTwice(self):
@jit
def f(x):
y = jnp.float32(2) * x
@jit
def g(z):
return self.pmap(lambda x: x[jnp.newaxis] * y)(z)
return g(x)
f(np.arange(1., dtype='float32').reshape((1, 1))) # doesn't crash
@ignore_jit_of_pmap_warning()
def testIssue1065(self):
# from https://github.com/jax-ml/jax/issues/1065
device_count = jax.device_count()
def multi_step_pmap(state, count):
@partial(self.pmap, axis_name='x')
@jit
def exchange_and_multi_step(state):
return state
@jit
def time_evolution(state):
return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
return time_evolution(state)
multi_step_pmap(jnp.zeros((device_count,)), count=1)
@jtu.ignore_warning(category=DeprecationWarning)
def test_typed_prng_key_sharded(self):
devices = jax.local_devices()
@partial(jax.pmap, in_axes=0, out_axes=0, axis_size=len(devices),
axis_name='i', devices=devices)
def fn(key):
return jax.random.fold_in(key, 0)
sharded_key = jax.random.split(jax.random.key(0), len(devices))
replicated_key = jax.random.key(1)
sharded_key = jax.device_put_sharded(jnp.unstack(sharded_key), devices)
replicated_key = jax.device_put_replicated(replicated_key, devices)
fn(sharded_key)
fn(replicated_key)
def testArrayGetItem(self):
f = lambda x: 2 * x
f = self.pmap(f, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
y = f(x)
self.assertIsInstance(y, jax.Array)
self.assertIsInstance(y, array.ArrayImpl)
z = y[0] # doesn't crash
self.assertAllClose(z, 2 * x[0], check_dtypes=False)
# TODO(mattjj): this fails with multiple devices (unless we add a jit)
# because we assume eager ops (like scan here) can't require more than 1
# replica.
@unittest.skip("need eager multi-replica support")
def testPostProcessMap(self):
# test came from https://github.com/jax-ml/jax/issues/1369
nrep = jax.device_count()
def pmvm(a, b):
a = a.reshape((nrep, -1, a.shape[1]))
func = self.pmap(lambda z: jnp.dot(z, b))
return func(a).reshape(b.shape)
n = nrep * 2
rng = self.rng()
a = rng.randn(n, n)
b = rng.randn(n)
iters = jnp.arange(5)
def body(carry, i):
return pmvm(a, carry), i
ans, _ = lax.scan(body, b, iters)
expected = np.linalg.matrix_power(a, 5).dot(b)
self.assertAllClose(ans, expected, check_dtypes=False)
def testManyArgs(self):
@self.pmap
def f(args_list):
return sum(args_list)
vals = list(range(500))
ndevices = jax.device_count()
self.assertAllClose(f([np.array([i] * ndevices) for i in range(500)]),
jnp.array([sum(vals)] * ndevices))
@jax.default_matmul_precision("float32")
def testPostProcessMap2(self):
# code from https://github.com/jax-ml/jax/issues/2787
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def distributed_matrix_vector(x, y):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
res = self.pmap(fv)(x.reshape((jax.device_count(), -1) + tuple(x.shape[1:])))
res = res.reshape(res.shape[0] * res.shape[1], *res.shape[2:])
return res
key = lambda: random.PRNGKey(1)
x = random.normal(key(), (80, 50))
batched_mvm = vmap(lambda b: distributed_matrix_vector(x, b), in_axes=0)
y = random.normal(key(), (10, 50, 1))
result = batched_mvm(y)
expected = jnp.einsum('ij,njk->nik', x, y)
self.assertAllClose(result, expected, check_dtypes=False, atol=1e-3,
rtol=1e-3)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def testAxisIndexRemat(self, remat):
# https://github.com/jax-ml/jax/issues/2716
n = len(jax.devices())
def f(key):
key = random.fold_in(key, jax.lax.axis_index('i'))
return random.bernoulli(key, p=0.5)
keys = random.split(random.PRNGKey(0), n)
self.pmap(remat(f), axis_name='i')(keys)
def testPmapMapVmapCombinations(self):
# https://github.com/jax-ml/jax/issues/2822
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def matrix_vector(x, y, parallel=True):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
if parallel:
# split leading axis in two
new_x = x.reshape((jax.device_count(), -1, *x.shape[1:]))
# apply map
new_res = self.pmap(fv)(new_x)
# reshape back out
res = new_res.reshape(x.shape[0], *new_res.shape[2:])
else:
res = fv(x)
return res
x = random.normal(random.PRNGKey(1), (40, 5))
y = random.normal(random.PRNGKey(1), (5, 5))
result1 = vmap(lambda b: matrix_vector(x, b, True))(y) # vmap + pmap
result2 = lax.map(lambda b: matrix_vector(x, b, False), y) # map + map
with ignore_jit_of_pmap_warning():
result3 = lax.map(lambda b: matrix_vector(x, b, True), y) # map + pmap
result4 = jnp.stack([matrix_vector(x, b, False) for b in y]) # none + map
self.assertAllClose(result1, result2, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result3, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result4, check_dtypes=False, atol=1e-3, rtol=1e-3)
def testPmapAxisNameError(self):
# https://github.com/jax-ml/jax/issues/3120
a = np.arange(4)[np.newaxis,:]
def test(x):
return jax.lax.psum(x, axis_name='batch')
with self.assertRaisesRegex(NameError, "unbound axis name: batch"):
self.pmap(test)(a)
def testPsumOnBooleanDtype(self):
# https://github.com/jax-ml/jax/issues/3123
n = jax.device_count()
if n > 1:
x = jnp.array([True, False])
out = self.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1, 1])
out = self.pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1/2, 1/2])
else:
x = jnp.array([True])
out = self.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
out = self.pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
def testPsumWithNoAxisDoesntLeakFunctions(self):
if config.pmap_shmap_merge.value:
raise SkipTest("shmap implementation holds an additional weakref.")
x = jnp.ones((1, 1024), dtype=np.float32)
f = lambda _: x
w = weakref.ref(f)
g = self.pmap(f)
g(np.ones((1,), dtype=np.float32)).block_until_ready()
del f, g
gc.collect()
# 'f' should not be alive at this point; in particular the pmap cache must
# not keep it alive.
self.assertIs(w(), None)
def testJitOfPmapWarningMessage(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Test does not warn under `pmap_shmap_merge=True`.")
device_count = jax.device_count()
if device_count == 1 or config.disable_jit.value:
raise SkipTest("test requires at least two devices")
def foo(x): return x
with self.assertWarnsRegex(UserWarning, "The function jit.foo. includes a pmap"):
jit(self.pmap(foo))(jnp.arange(device_count))
def testJitOfPmapOutputSharding(self):
device_count = jax.device_count()
if device_count == 1 or config.disable_jit.value:
raise SkipTest("test requires at least two devices")
@jax.jit
@jax.pmap
def foo(x):
return x + x
x = np.ones((2,2,2), dtype=np.float32)
for _ in range(10):
# Does not crash.
with jtu.ignore_warning(
message=".*Using jit-of-pmap can lead to inefficient data movement"):
x = foo(x)
@jtu.ignore_warning(
message=".*Using jit-of-pmap can lead to inefficient data movement")
def testJitOfPmapLowerHasReplicaAttributes(self):
device_count = jax.device_count()
if device_count == 1 or config.disable_jit.value:
raise SkipTest("test requires at least two devices")
@jax.jit
@jax.pmap
def foo(x):
return x + x
x = np.ones((2,2,2), dtype=np.float32)
hlo = foo.lower(x).as_text("stablehlo")
if config.pmap_shmap_merge.value:
self.assertIn("mhlo.num_replicas = 1", hlo)
self.assertIn("mhlo.num_partitions = 2", hlo)
else:
self.assertIn(f"mhlo.num_replicas = {2}", hlo)
self.assertIn("mhlo.num_partitions = 1", hlo)
def testPsumZeroCotangents(self):
# https://github.com/jax-ml/jax/issues/3651
def loss(params, meta_params):
(net, mpo) = params
return meta_params * mpo * net
def inner(meta_params, params):
grads = jax.grad(loss)(params, meta_params)
grads = lax.psum(grads, axis_name="i")
net_grads, mpo_grads = grads
net = params[0] + net_grads
mpo = params[1]
return mpo * net
def outer(params):
meta_params = jnp.array(4.0)
return jax.grad(inner)(meta_params, params)
params = (jnp.array([2.0]), jnp.array([3.0]))
self.pmap(outer, axis_name='i')(params) # doesn't crash
f = self.pmap(outer, axis_name='i')
jtu.check_grads(f, (params,), 2, ["fwd", "rev"], 1e-3, 1e-3)
@ignore_jit_of_pmap_warning()
def test_issue_1062(self):
# code from https://github.com/jax-ml/jax/issues/1062 @shoyer
# this tests, among other things, whether ShardedDeviceTuple constants work
device_count = jax.device_count()
@jit
def multi_step(state, count):
return lax.fori_loop(0, count, lambda i, s: s, state)
@jit
def multi_step_pmap(state, count=2):
@partial(self.pmap, axis_name='x')
def pmapped_multi_step(state):
return multi_step(state, count)
return pmapped_multi_step(state)
u = np.ones((device_count, 100))
multi_step_pmap(u) # doesn't crash
@jtu.skip_on_devices("cpu")
def test_replicate_backend(self):
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmaps under `pmap_shmap_merge=True`.")
# TODO(skye): fix backend caching so we always have multiple CPUs available
if jax.device_count("cpu") < 4:
self.skipTest("test requires 4 CPU device")
# https://github.com/jax-ml/jax/issues/4223
def fn(indices):
return jnp.equal(indices, jnp.arange(3)).astype(jnp.float32)
mapped_fn = self.pmap(fn, axis_name='i', backend='cpu')
mapped_fn = self.pmap(mapped_fn, axis_name='j', backend='cpu')
indices = np.array([[[2], [1]], [[0], [0]]])
mapped_fn(indices) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_collective={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, collective.__name__.replace(" ", "")),
"shape": shape, "dtype": dtype, "axis": axis,
"collective": collective, "bulk_op": bulk_op}
for collective, bulk_op in [
(parallel.pargmax, jnp.argmax),
(parallel.pargmin, jnp.argmin)
]
for dtype in [np.float32, np.int32]
for shape in [(4,), (2, 2), (2, 4), (4, 2)]
for axis in range(len(shape))
)
def testArgAllReduce(self, shape, dtype, axis, collective, bulk_op):
if jax.device_count() < shape[axis]:
raise SkipTest(f"test requires at least {shape[axis]} devices")
if (jtu.test_device_matches(['cpu']) and
np.issubdtype(dtype, np.floating) and
len(shape) > 1):
raise SkipTest("skipped on cpu due to strange failures") # TODO(mattjj)
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
ans = self.pmap(lambda x: collective(x, 'i'), in_axes=axis, out_axes=None,
axis_name='i')(x)
expected = bulk_op(x, axis=axis)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_dtype={}".format(
jtu.format_shape_dtype_string((), dtype)),
"dtype": dtype}
for dtype in [np.float32, np.int32]
)
def testPmapDtype(self, dtype):
# Regression test for https://github.com/jax-ml/jax/issues/6022
@partial(self.pmap, axis_name='i')
def func(_):
return jax.lax.psum(dtype(0), axis_name='i')
unused_arg = jnp.arange(jax.device_count())
out_dtype = func(unused_arg).dtype
self.assertEqual(out_dtype, dtype)
def test_num_replicas_with_switch(self):
# https://github.com/jax-ml/jax/issues/7411
def identity(x):
return x
def cond_of_pmap(x):
y = lax.cond(True, jax.pmap(identity), jax.pmap(identity), x)
return y
with ignore_jit_of_pmap_warning():
cond_of_pmap(jnp.zeros((jax.device_count(), 2)))
def test_static_argnum_on_method(self):
class A:
@partial(self.pmap, static_broadcasted_argnums=(0,))
def my_func_pmap(self, x):
return x + 2
A().my_func_pmap(jnp.asarray([3] * jax.device_count()))
def test_pmap_error_on_non_hashable_static_argument(self):
f = lambda x, y: x + 3
pmapped_f = self.pmap(f, static_broadcasted_argnums=(1,))
inputs = np.asarray([1] * jax.device_count())
with self.assertRaisesRegex(
ValueError, "Non-hashable static arguments are not supported.*"):
pmapped_f(inputs, np.asarray(1))
@parameterized.named_parameters(
{"testcase_name": f"_{axis_size=}", "axis_size": axis_size}
for axis_size in [1, 2])
def test_grad_of_pmap_compilation_caching(self, axis_size):
if len(jax.local_devices()) < axis_size:
raise SkipTest("too few devices for test")
if config.disable_jit.value:
raise SkipTest("caching doesn't apply with jit disabled")
@jax.pmap
def f(x):
return jnp.sin(x)
# warm-up the cache
x = jnp.ones(axis_size)
_, f_bwd = jax.vjp(f, x)
_ = f_bwd(x)
with jtu.count_jit_and_pmap_lowerings() as count: # noqa: F841
_, f_bwd2 = jax.vjp(f, x)
_ = f_bwd(x)
_ = f_bwd2(x)
self.assertEqual(count(), 0) # cache hits on fwd and bwd
def testSizeOverflow(self):
if config.disable_jit.value:
# TODO(sharadmv, mattjj): investigate and fix this issue
raise SkipTest("OOMs in eager mode")
x = jnp.arange(1)
x = self.pmap(lambda _: jnp.ones([8, 267736, 1024], dtype=jnp.int8))(x)
self.assertEqual(x.size, 8 * 267736 * 1024)
self.assertEqual(type(x.size), int)
def test_axis_env_length(self):
f = lambda x: jax.pmap(g)(jnp.array([x]))[0]
def g(x):
assert len(core.get_axis_env().axis_names()) == 1
return x
jax.grad(f)(3.) # doesn't fail
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_of_pmap(self, remat):
f = remat(jax.pmap(lambda x: jnp.sin(jnp.sin(x))))
jtu.check_grads(f, (jnp.arange(1.),), order=2, modes=["rev"])
x = jnp.arange(1.)
jaxpr = jax.make_jaxpr(jax.linearize(f, x)[1])(x)
self.assertIn(' sin ', str(jaxpr))
self.assertIn(' cos ', str(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', jax.remat),
('_new', new_checkpoint),
])
def test_remat_of_pmap_policy(self, remat):
g = jax.pmap(lambda x: jnp.sin(jnp.sin(x)))
x = jnp.arange(1.)
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = remat(g, policy=save_cos)
_, f_vjp = jax.vjp(f, x)
jaxpr = f_vjp.jaxpr
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 0)
save_sin = lambda prim, *_, **__: str(prim) == 'sin'
f = remat(g, policy=save_sin)
_, f_vjp = jax.vjp(f, x)
jaxpr = f_vjp.jaxpr
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 0)
self.assertEqual(jaxpr_text.count(' cos '), 2)
save_nothing = lambda prim, *_, **__: False
f = remat(g, policy=save_nothing)
_, f_vjp = jax.vjp(f, x)
jaxpr = f_vjp.jaxpr
jaxpr_text = str(jaxpr)
self.assertEqual(jaxpr_text.count(' sin '), 1)
self.assertEqual(jaxpr_text.count(' cos '), 2)
def test_axis_name_shadowing_with_vmap(self):
# vmap-of-pmap with mismatched axis sizes
jax.vmap(jax.pmap(lambda x: 2 * x, axis_name='i'),
axis_name='i')(jax.numpy.ones((2, 1))) # don't crash
# vmap-of-pmap with matched axis sizes
jax.vmap(jax.pmap(lambda x: 2 * x, axis_name='i'),
axis_name='i')(jax.numpy.ones((1, 1))) # don't crash
# vmap-of-vmap with mismatched axis sizes
jax.vmap(jax.vmap(lambda x: 2 * x, axis_name='i'),
axis_name='i')(jax.numpy.ones((2, 1))) # don't crash
# vmap-of-vmap with matched axis sizes
jax.vmap(jax.vmap(lambda x: 2 * x, axis_name='i'),
axis_name='i')(jax.numpy.ones((1, 1))) # don't crash
@jtu.run_on_devices("cpu")
def test_pmap_stack_size(self):
# Regression test for https://github.com/jax-ml/jax/issues/20428
# pmap isn't particularly important here, but it guarantees that the CPU
# client runs the computation on a threadpool rather than inline.
if jax.device_count() < 2:
raise SkipTest("test requires at least two devices")
x = jnp.eye(200)
y = jax.pmap(jax.scipy.linalg.expm)(jnp.array([x, x]))
y.block_until_ready() # doesn't crash
def test_pmap_of_prng_key(self):
# Regression test for https://github.com/jax-ml/jax/issues/20392
keys = jax.random.split(jax.random.key(0), jax.device_count())
result1 = jax.pmap(jax.random.bits)(keys)
with jtu.ignore_warning(
category=UserWarning, message="The function jit.bits. includes a pmap"):
result2 = jax.jit(jax.pmap(jax.random.bits))(keys)
self.assertArraysEqual(result1, result2)
| PythonPmapTest |
python | OmkarPathak__pygorithm | pygorithm/data_structures/linked_list.py | {
"start": 679,
"end": 3400
} | class ____(object):
"""
Defining the head of the linked list
"""
def __init__(self):
"""
constructor
"""
self.head = None
def _search(self, node, data):
"""
searches the node, if valid returns the node else return false
"""
if node is None:
return False
if node.data == data:
return node
return self._search(node.next, data)
def get_data(self):
"""
prints the elements in the linked list
"""
temp = self.head
l_list = []
while temp:
l_list.append(temp.data)
temp = temp.next
return l_list
def insert_at_start(self, data):
"""
insert an item at the beginning of the linked list
"""
if self.head is None:
new_node = Node(data)
self.head = new_node
else:
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def insert_after(self, next_node_data, data):
"""
insert an item after an element in the linked list
"""
new_node = Node(data)
current_node = self._search(self.head, next_node_data)
new_node.next = current_node.next
current_node.next = new_node
def insert_at_end(self, data):
"""
insert an item at the end of the linked list
"""
new_node = Node(data)
temp = self.head
# get last node
while temp.next is not None:
temp = temp.next
temp.next = new_node
def delete(self, data):
"""
to delete specified element from the linked list
"""
temp = self.head
# if data/key is found in head node itself
if temp is not None:
if temp.data == data:
self.head = temp.next
return
else:
# else search all the nodes
while temp.next is not None:
if temp.data == data:
break
# save current node as previous so that we can go on to next node
prev = temp
temp = temp.next
# node not found
if temp is None:
return
# TODO: local variable 'prev' might be referenced before assignment
# TODO: Fix this
prev.next = temp.next
return
@staticmethod
def get_code():
"""
return the code for the current class
"""
return inspect.getsource(SinglyLinkedList)
| SinglyLinkedList |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 16454,
"end": 17538
} | class ____(nn.Module):
def __init__(self, config, act_fn=None, dropout=None):
super().__init__()
dropout = dropout if dropout is not None else config.speech_encoder_dropout
act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act
self.intermediate_dropout = nn.Dropout(dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size)
self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
| SeamlessM4TConformerFeedForward |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 627,
"end": 962
} | class ____:
params = [["int64", "datetime64[ns]", "category", "Int64"], [None, "foo"]]
param_names = ["dtype", "name"]
def setup(self, dtype, name):
arr = np.arange(10**5)
ser = Series(arr, dtype=dtype)
self.ser = ser
def time_to_frame(self, dtype, name):
self.ser.to_frame(name)
| ToFrame |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/sparse_batch_test.py | {
"start": 1164,
"end": 4314
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
components = np.random.randint(12, size=(100,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.fill([x], x)).sparse_batch(4, [12])
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j]
for i, c in enumerate(components[start:start + 4])
for j in range(c)], results.indices)
self.assertAllEqual(
[c for c in components[start:start + 4] for _ in range(c)],
results.values)
self.assertAllEqual([min(4,
len(components) - start), 12],
results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testWithUnknownShape(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.fill([x, x], x)).sparse_batch(4, [5, None])
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j, z]
for i, c in enumerate(components[start:start + 4])
for j in range(c)
for z in range(c)], results.indices)
self.assertAllEqual([
c for c in components[start:start + 4] for _ in range(c)
for _ in range(c)
], results.values)
self.assertAllEqual([
min(4,
len(components) - start), 5,
np.max(components[start:start + 4])
], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testWithInvalidShape(self):
input_tensor = array_ops.constant([[1]])
with self.assertRaisesRegex(ValueError, "Dimension -2 must be >= 0"):
dataset_ops.Dataset.from_tensors(input_tensor).sparse_batch(4, [-2])
@combinations.generate(test_base.default_test_combinations())
def testShapeErrors(self):
def dataset_fn(input_tensor):
return dataset_ops.Dataset.from_tensors(input_tensor).sparse_batch(
4, [12])
# Initialize with an input tensor of incompatible rank.
get_next = self.getNext(dataset_fn([[1]]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"incompatible with the row shape"):
self.evaluate(get_next())
# Initialize with an input tensor that is larger than `row_shape`.
get_next = self.getNext(dataset_fn(np.int32(range(13))))
with self.assertRaisesRegex(errors.DataLossError,
"larger than the row shape"):
self.evaluate(get_next())
| DenseToSparseBatchTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 685660,
"end": 686572
} | class ____(sgqlc.types.relay.Connection):
"""A list of languages associated with the parent."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count", "total_size")
edges = sgqlc.types.Field(sgqlc.types.list_of("LanguageEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Language"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
total_size = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalSize")
"""The total size in bytes of files written in that language."""
| LanguageConnection |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 53850,
"end": 54216
} | class ____:
def setup_method(self):
self.b = BaseBackend(
app=self.app,
url='sch://uuuu:pwpw@hostname.dom'
)
def test_as_uri_include_password(self):
assert self.b.as_uri(True) == self.b.url
def test_as_uri_exclude_password(self):
assert self.b.as_uri() == 'sch://uuuu:**@hostname.dom/'
| test_as_uri |
python | sympy__sympy | sympy/functions/special/mathieu_functions.py | {
"start": 3499,
"end": 5060
} | class ____(MathieuBase):
r"""
The derivative $S^{\prime}(a,q,z)$ of the Mathieu Sine function.
Explanation
===========
This function is one solution of the Mathieu differential equation:
.. math ::
y(x)^{\prime\prime} + (a - 2 q \cos(2 x)) y(x) = 0
The other solution is the Mathieu Cosine function.
Examples
========
>>> from sympy import diff, mathieusprime
>>> from sympy.abc import a, q, z
>>> mathieusprime(a, q, z)
mathieusprime(a, q, z)
>>> mathieusprime(a, 0, z)
sqrt(a)*cos(sqrt(a)*z)
>>> diff(mathieusprime(a, q, z), z)
(-a + 2*q*cos(2*z))*mathieus(a, q, z)
See Also
========
mathieus: Mathieu sine function
mathieuc: Mathieu cosine function
mathieucprime: Derivative of Mathieu cosine function
References
==========
.. [1] https://en.wikipedia.org/wiki/Mathieu_function
.. [2] https://dlmf.nist.gov/28
.. [3] https://mathworld.wolfram.com/MathieuFunction.html
.. [4] https://functions.wolfram.com/MathieuandSpheroidalFunctions/MathieuSPrime/
"""
def fdiff(self, argindex=1):
if argindex == 3:
a, q, z = self.args
return (2*q*cos(2*z) - a)*mathieus(a, q, z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, q, z):
if q.is_Number and q.is_zero:
return sqrt(a)*cos(sqrt(a)*z)
# Try to pull out factors of -1
if z.could_extract_minus_sign():
return cls(a, q, -z)
| mathieusprime |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 47171,
"end": 47871
} | class ____(CoercionTest):
# previously test_setitem_series_datetime64 in tests.indexing.test_coercion
@pytest.fixture
def obj(self):
return Series(date_range("2011-01-01", freq="D", periods=4, unit="ns"))
@pytest.fixture
def raises(self):
return False
@pytest.mark.parametrize(
"val,exp_dtype,raises",
[
(Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]", False),
# pre-2.0, a mis-matched tz would end up casting to object
(Timestamp("2012-01-01", tz="US/Pacific"), "datetime64[ns, US/Eastern]", False),
(Timestamp("2012-01-01"), object, True),
(1, object, True),
],
)
| TestCoercionDatetime64 |
python | getsentry__sentry | tests/sentry/api/endpoints/release_thresholds/health_checks/test_is_new_issue_count_healthy.py | {
"start": 480,
"end": 6948
} | class ____(TestCase):
def setUp(self) -> None:
self.project1 = self.create_project(name="foo", organization=self.organization)
self.release1 = Release.objects.create(version="v1", organization=self.organization)
def test_success(self) -> None:
now = timezone.now()
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
"id": "1",
}
mock_new_issue_counts = {
"1": 0,
}
is_healthy, metric_value = is_new_issue_count_healthy(mock_threshold, mock_new_issue_counts)
assert is_healthy
assert metric_value == 0
def test_multiple_thresholds(self) -> None:
now = timezone.now()
threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
"id": "1",
}
threshold2: EnrichedThreshold = {
"id": "2",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
"id": "2",
}
threshold3: EnrichedThreshold = {
"id": "3",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
"id": "3",
}
mock_new_issue_counts = {
"1": 0,
"2": 10,
"3": 100,
}
is_healthy, metric_value = is_new_issue_count_healthy(threshold, mock_new_issue_counts)
assert is_healthy
assert metric_value == 0
is_healthy, metric_value = is_new_issue_count_healthy(threshold2, mock_new_issue_counts)
assert is_healthy
assert metric_value == 10
is_healthy, metric_value = is_new_issue_count_healthy(threshold3, mock_new_issue_counts)
assert not is_healthy
assert metric_value == 100
def test_success_under(self) -> None:
now = timezone.now()
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.UNDER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
"id": "1",
}
mock_new_issue_counts = {
"1": 0,
}
is_healthy, metric_value = is_new_issue_count_healthy(mock_threshold, mock_new_issue_counts)
assert not is_healthy
assert metric_value == 0
def test_no_new_issues(self) -> None:
now = timezone.now()
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.NEW_ISSUE_COUNT,
"trigger_type": TriggerType.OVER_STR,
"value": 10,
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
mock_new_issue_counts: dict[str, Any] = {}
is_healthy, metric_value = is_new_issue_count_healthy(mock_threshold, mock_new_issue_counts)
assert is_healthy
assert metric_value == 0
| NewIssueCountThresholdCheckTest |
python | getsentry__sentry | tests/sentry/preprod/test_models_size_metrics.py | {
"start": 278,
"end": 10280
} | class ____(TestCase):
"""Tests for PreprodArtifact size metrics related methods."""
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(
teams=[self.team], organization=self.organization, name="test_project"
)
def test_get_size_metrics_filtering(self):
"""Test the get_size_metrics method with various filters."""
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.filtering",
)
main_metrics = Factories.create_preprod_artifact_size_metrics(
artifact,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
watch_metrics = Factories.create_preprod_artifact_size_metrics(
artifact,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
)
feature_metrics = Factories.create_preprod_artifact_size_metrics(
artifact,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.ANDROID_DYNAMIC_FEATURE,
identifier="test_feature",
)
# Test getting all metrics (no filters)
all_metrics = artifact.get_size_metrics()
assert all_metrics.count() == 3
# Test filtering by metrics type
main_only = artifact.get_size_metrics(
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT
)
assert main_only.count() == 1
main_first = main_only.first()
assert main_first is not None
assert main_first.id == main_metrics.id
watch_only = artifact.get_size_metrics(
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT
)
assert watch_only.count() == 1
watch_first = watch_only.first()
assert watch_first is not None
assert watch_first.id == watch_metrics.id
# Test filtering by identifier
feature_only = artifact.get_size_metrics(identifier="test_feature")
assert feature_only.count() == 1
feature_first = feature_only.first()
assert feature_first is not None
assert feature_first.id == feature_metrics.id
# Test filtering by both type and identifier
feature_typed = artifact.get_size_metrics(
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.ANDROID_DYNAMIC_FEATURE,
identifier="test_feature",
)
assert feature_typed.count() == 1
feature_typed_first = feature_typed.first()
assert feature_typed_first is not None
assert feature_typed_first.id == feature_metrics.id
# Test no matches
no_matches = artifact.get_size_metrics(identifier="nonexistent")
assert no_matches.count() == 0
def test_get_size_metrics_for_artifacts_bulk(self):
"""Test the bulk get_size_metrics_for_artifacts classmethod."""
artifact1 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.bulk1",
)
artifact2 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.bulk2",
)
artifact1_main = Factories.create_preprod_artifact_size_metrics(
artifact1,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
artifact1_watch = Factories.create_preprod_artifact_size_metrics(
artifact1,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
)
artifact2_main = Factories.create_preprod_artifact_size_metrics(
artifact2,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
# Test bulk retrieval with no filters (should get all metrics)
results = PreprodArtifact.get_size_metrics_for_artifacts([artifact1, artifact2])
assert artifact1.id in results
assert artifact2.id in results
assert results[artifact1.id].count() == 2 # main + watch
assert results[artifact2.id].count() == 1 # main only
# Test bulk retrieval with type filter (should get only main metrics)
main_results = PreprodArtifact.get_size_metrics_for_artifacts(
[artifact1, artifact2],
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
assert main_results[artifact1.id].count() == 1
assert main_results[artifact2.id].count() == 1
artifact1_main_first = main_results[artifact1.id].first()
assert artifact1_main_first is not None
assert artifact1_main_first.id == artifact1_main.id
artifact2_main_first = main_results[artifact2.id].first()
assert artifact2_main_first is not None
assert artifact2_main_first.id == artifact2_main.id
# Test bulk retrieval with identifier filter
watch_results = PreprodArtifact.get_size_metrics_for_artifacts(
[artifact1, artifact2],
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
)
assert watch_results[artifact1.id].count() == 1
assert watch_results[artifact2.id].count() == 0 # No watch metrics for artifact2
artifact1_watch_first = watch_results[artifact1.id].first()
assert artifact1_watch_first is not None
assert artifact1_watch_first.id == artifact1_watch.id
# Test with empty list
empty_results = PreprodArtifact.get_size_metrics_for_artifacts([])
assert empty_results == {}
def test_get_size_metrics_ignores_other_artifacts(self):
"""Test that get_size_metrics only returns metrics for the specific artifact."""
artifact1 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app1",
)
artifact2 = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app2",
)
artifact1_main = Factories.create_preprod_artifact_size_metrics(
artifact1,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
artifact1_watch = Factories.create_preprod_artifact_size_metrics(
artifact1,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
)
artifact1_feature = Factories.create_preprod_artifact_size_metrics(
artifact1,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.ANDROID_DYNAMIC_FEATURE,
identifier="feature_a",
)
artifact2_main = Factories.create_preprod_artifact_size_metrics(
artifact2,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
artifact2_watch = Factories.create_preprod_artifact_size_metrics(
artifact2,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
)
artifact2_feature = Factories.create_preprod_artifact_size_metrics(
artifact2,
metrics_type=PreprodArtifactSizeMetrics.MetricsArtifactType.ANDROID_DYNAMIC_FEATURE,
identifier="feature_a", # Same identifier as artifact1 but different artifact
)
# Test artifact1's metrics - should only get artifact1 metrics, not artifact2
artifact1_metrics = artifact1.get_size_metrics()
assert artifact1_metrics.count() == 3
artifact1_ids = {m.id for m in artifact1_metrics}
expected_artifact1_ids = {artifact1_main.id, artifact1_watch.id, artifact1_feature.id}
assert artifact1_ids == expected_artifact1_ids
# Ensure none of artifact2's metrics are included
artifact2_ids = {artifact2_main.id, artifact2_watch.id, artifact2_feature.id}
assert artifact1_ids.isdisjoint(artifact2_ids)
# Test artifact2's metrics - should only get artifact2 metrics, not artifact1
artifact2_metrics = artifact2.get_size_metrics()
assert artifact2_metrics.count() == 3
artifact2_result_ids = {m.id for m in artifact2_metrics}
assert artifact2_result_ids == artifact2_ids
# Ensure none of artifact1's metrics are included
assert artifact2_result_ids.isdisjoint(expected_artifact1_ids)
# Test filtering by type - should still only return metrics for the specific artifact
artifact1_main_only = artifact1.get_size_metrics(
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT
)
assert artifact1_main_only.count() == 1
artifact1_main_only_first = artifact1_main_only.first()
assert artifact1_main_only_first is not None
assert artifact1_main_only_first.id == artifact1_main.id
assert artifact1_main_only_first.id != artifact2_main.id
# Test filtering by identifier - should still only return metrics for the specific artifact
artifact1_feature_only = artifact1.get_size_metrics(identifier="feature_a")
assert artifact1_feature_only.count() == 1
artifact1_feature_only_first = artifact1_feature_only.first()
assert artifact1_feature_only_first is not None
assert artifact1_feature_only_first.id == artifact1_feature.id
assert artifact1_feature_only_first.id != artifact2_feature.id
| PreprodArtifactSizeMetricsTest |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-yandexgpt/llama_index/embeddings/yandexgpt/base.py | {
"start": 661,
"end": 7705
} | class ____(BaseEmbedding):
"""
A class representation for generating embeddings using the Yandex Cloud API.
Args:
api_key (Optional[str]): An API key for Yandex Cloud.
model_name (str): The name of the model to be used for generating embeddings.
The class ensures that this model is supported. Defaults to "general:embedding".
embed_batch_size (int): The batch size for embedding. Defaults to DEFAULT_EMBED_BATCH_SIZE.
callback_manager (Optional[CallbackManager]): Callback manager for hooks.
Example:
. code-block:: python
from llama_index.embeddings.yandexgpt import YandexGPTEmbedding
embeddings = YandexGPTEmbedding(
api_key="your-api-key",
folder_id="your-folder-id",
)
"""
api_key: str = Field(description="The YandexGPT API key.")
folder_id: str = Field(description="The folder id for YandexGPT API.")
retries: int = 6
sleep_interval: float = 0.1
def __init__(
self,
api_key: Optional[str] = None,
folder_id: Optional[str] = None,
model_name: str = "general:embedding",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
if not api_key:
raise ValueError(
"You must provide an API key or IAM token to use YandexGPT. "
"You can either pass it in as an argument or set it `YANDEXGPT_API_KEY`."
)
if not folder_id:
raise ValueError(
"You must provide catalog_id to use YandexGPT. "
"You can either pass it in as an argument or set it `YANDEXGPT_CATALOG_ID`."
)
api_key = get_from_param_or_env("api_key", api_key, "YANDEXGPT_KEY")
folder_id = get_from_param_or_env(
"folder_id", folder_id, "YANDEXGPT_CATALOG_ID"
)
super().__init__(
model_name=model_name,
api_key=api_key,
folder_id=folder_id,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
def _getModelUri(self, is_document: bool = False) -> str:
"""Construct the model URI based on whether the text is a document or a query."""
return f"emb://{self.folder_id}/text-search-{'doc' if is_document else 'query'}/latest"
@classmethod
def class_name(cls) -> str:
"""Return the class name."""
return "YandexGPTEmbedding"
def _embed(self, text: str, is_document: bool = False) -> List[float]:
"""
Embeds text using the YandexGPT Cloud API synchronously.
Args:
text: The text to embed.
is_document: Whether the text is a document (True) or a query (False).
Returns:
A list of floats representing the embedding.
Raises:
YException: If an error occurs during embedding.
"""
payload = {"modelUri": self._getModelUri(is_document), "text": text}
header = {
"Content-Type": "application/json",
"Authorization": f"Api-Key {self.api_key}",
"x-data-logging-enabled": "false",
}
try:
for attempt in Retrying(
stop=stop_after_attempt(self.retries),
wait=wait_fixed(self.sleep_interval),
):
with attempt:
response = requests.post(
DEFAULT_YANDEXGPT_API_BASE, json=payload, headers=header
)
response = response.json()
if "embedding" in response:
return response["embedding"]
raise YException(f"No embedding found, result returned: {response}")
except RetryError:
raise YException(
f"Error computing embeddings after {self.retries} retries. Result returned:\n{response}"
)
async def _aembed(self, text: str, is_document: bool = False) -> List[float]:
"""
Embeds text using the YandexGPT Cloud API asynchronously.
Args:
text: The text to embed.
is_document: Whether the text is a document (True) or a query (False).
Returns:
A list of floats representing the embedding.
Raises:
YException: If an error occurs during embedding.
"""
payload = {"modelUri": self._getModelUri(is_document), "text": text}
header = {
"Content-Type": "application/json",
"Authorization": f"Api-Key {self.api_key}",
"x-data-logging-enabled": "false",
}
try:
for attempt in Retrying(
stop=stop_after_attempt(self.retries),
wait=wait_fixed(self.sleep_interval),
):
with attempt:
async with aiohttp.ClientSession() as session:
async with session.post(
DEFAULT_YANDEXGPT_API_BASE, json=payload, headers=header
) as response:
result = await response.json()
if "embedding" in result:
return result["embedding"]
raise YException(
f"No embedding found, result returned: {result}"
)
except RetryError:
raise YException(
f"Error computing embeddings after {self.retries} retries. Result returned:\n{result}"
)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding sync."""
return self._embed(text, is_document=True)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get list of texts embeddings sync."""
embeddings = []
for text in texts:
embeddings.append(self._embed(text, is_document=True))
time.sleep(self.sleep_interval)
return embeddings
def _get_query_embedding(self, text: str) -> List[float]:
"""Get query embedding sync."""
return self._embed(text, is_document=False)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get query text async."""
return await self._aembed(text, is_document=True)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get list of texts embeddings async."""
embeddings = []
for text in texts:
embeddings.append(await self._aembed(text, is_document=True))
await asyncio.sleep(self.sleep_interval)
return embeddings
async def _aget_query_embedding(self, text: str) -> List[float]:
"""Get query embedding async."""
return await self._aembed(text, is_document=False)
| YandexGPTEmbedding |
python | readthedocs__readthedocs.org | readthedocs/projects/views/public.py | {
"start": 9383,
"end": 13672
} | class ____(CDNCacheControlMixin, CDNCacheTagsMixin, ServeDocsMixin, View):
# Use new-style URLs (same domain as docs) or old-style URLs (dashboard URL)
same_domain_url = False
def get(
self,
request,
project_slug=None,
type_=None,
version_slug=None,
lang_slug=None,
subproject_slug=None,
):
"""
Download a specific piece of media.
Perform an auth check if serving in private mode.
This view is used to download a file using old-style URLs (download from
the dashboard) and new-style URLs (download from the same domain as
docs). Basically, the parameters received by the GET view are different
(``project_slug`` does not come in the new-style URLs, for example) and
we need to take it from the request. Once we get the final ``version``
to be served, everything is the same for both paths.
.. warning:: This is linked directly from the HTML pages.
It should only care about the Version permissions,
not the actual Project permissions.
"""
if self.same_domain_url:
unresolved_domain = request.unresolved_domain
is_external = request.unresolved_domain.is_from_external_domain
manager = EXTERNAL if is_external else INTERNAL
# Additional protection to force all storage calls
# to use the external or internal versions storage.
# TODO: We already force the manager to match the type,
# so we could probably just remove this.
self.version_type = manager
# It uses the request to get the ``project``.
# The rest of arguments come from the URL.
project = unresolved_domain.project
# Use the project from the domain, or use the subproject slug.
if subproject_slug:
project = get_object_or_404(project.subprojects, alias=subproject_slug).child
# Redirect old language codes with underscores to new ones with dashes and lowercase.
normalized_language_code = lang_slug.lower().replace("_", "-")
if normalized_language_code != lang_slug:
if project.language != normalized_language_code:
project = get_object_or_404(
project.translations, language=normalized_language_code
)
return HttpResponseRedirect(
project.get_production_media_url(type_, version_slug=version_slug)
)
if project.language != lang_slug:
project = get_object_or_404(project.translations, language=lang_slug)
if is_external and unresolved_domain.external_version_slug != version_slug:
raise Http404
version = get_object_or_404(
project.versions(manager=manager),
slug=version_slug,
)
if not self.allowed_user(request, version):
return self.get_unauthed_response(request, project)
# All public versions can be cached.
self.cache_response = version.is_public
else:
# All the arguments come from the URL.
version = get_object_or_404(
Version.internal.public(user=request.user),
project__slug=project_slug,
slug=version_slug,
)
# TODO don't do this, it's a leftover of trying to use CDNCacheTagsMixin
# without class level variables. See proxito.views.serve for
# other instances of this pattern to update.
# See: https://github.com/readthedocs/readthedocs.org/pull/12495
self.project = version.project
self.version = version
return self._serve_dowload(
request=request,
project=version.project,
version=version,
type_=type_,
)
def _get_project(self):
"""Hack for CDNCacheTagsMixin, get project set in `get()`."""
return self.project
def _get_version(self):
"""Hack for CDNCacheTagsMixin, get version set in `get()`."""
return self.version
| ProjectDownloadMediaBase |
python | pandas-dev__pandas | pandas/tests/indexes/ranges/test_range.py | {
"start": 252,
"end": 29143
} | class ____:
@pytest.fixture
def simple_index(self):
return RangeIndex(start=0, stop=20, step=2)
def test_constructor_unwraps_index(self):
result = RangeIndex(1, 3)
expected = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(result._data, expected)
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self, simple_index):
index = simple_index
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
# GH 25710
assert index.start == start
assert index.stop == stop
assert index.step == step
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]), exact="equiv")
# GH 18295 (test missing)
expected = Index([0, np.nan, 1, 2, 3, 4], dtype=np.float64)
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
result = RangeIndex(5).insert(1, pd.NaT)
expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object)
tm.assert_index_equal(result, expected)
def test_insert_edges_preserves_rangeindex(self):
idx = Index(range(4, 9, 2))
result = idx.insert(0, 2)
expected = Index(range(2, 9, 2))
tm.assert_index_equal(result, expected, exact=True)
result = idx.insert(3, 10)
expected = Index(range(4, 11, 2))
tm.assert_index_equal(result, expected, exact=True)
def test_insert_middle_preserves_rangeindex(self):
# insert in the middle
idx = Index(range(0, 3, 2))
result = idx.insert(1, 1)
expected = Index(range(3))
tm.assert_index_equal(result, expected, exact=True)
idx = idx * 2
result = idx.insert(1, 2)
expected = expected * 2
tm.assert_index_equal(result, expected, exact=True)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:]
result = idx.delete(0)
tm.assert_index_equal(result, expected, exact=True)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
tm.assert_index_equal(result, expected, exact=True)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
def test_delete_preserves_rangeindex(self):
idx = Index(range(2), name="foo")
result = idx.delete([1])
expected = Index(range(1), name="foo")
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(1)
tm.assert_index_equal(result, expected, exact=True)
def test_delete_preserves_rangeindex_middle(self):
idx = Index(range(3), name="foo")
result = idx.delete(1)
expected = idx[::2]
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(-2)
tm.assert_index_equal(result, expected, exact=True)
def test_delete_preserves_rangeindex_list_at_end(self):
idx = RangeIndex(0, 6, 1)
loc = [2, 3, 4, 5]
result = idx.delete(loc)
expected = idx[:2]
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(loc[::-1])
tm.assert_index_equal(result, expected, exact=True)
def test_delete_preserves_rangeindex_list_middle(self):
idx = RangeIndex(0, 6, 1)
loc = [1, 2, 3, 4]
result = idx.delete(loc)
expected = RangeIndex(0, 6, 5)
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(loc[::-1])
tm.assert_index_equal(result, expected, exact=True)
def test_delete_all_preserves_rangeindex(self):
idx = RangeIndex(0, 6, 1)
loc = [0, 1, 2, 3, 4, 5]
result = idx.delete(loc)
expected = idx[:0]
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(loc[::-1])
tm.assert_index_equal(result, expected, exact=True)
def test_delete_not_preserving_rangeindex(self):
idx = RangeIndex(0, 6, 1)
loc = [0, 3, 5]
result = idx.delete(loc)
expected = Index([1, 2, 4])
tm.assert_index_equal(result, expected, exact=True)
result = idx.delete(loc[::-1])
tm.assert_index_equal(result, expected, exact=True)
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
def test_dtype(self, simple_index):
index = simple_index
assert index.dtype == np.int64
def test_cache(self):
# GH 26565, GH26617, GH35432, GH53387
# This test checks whether _cache has been set.
# Calling RangeIndex._cache["_data"] creates an int64 array of the same length
# as the RangeIndex and stores it in _cache.
idx = RangeIndex(0, 100, 10)
assert idx._cache == {}
repr(idx)
assert idx._cache == {}
str(idx)
assert idx._cache == {}
idx.get_loc(20)
assert idx._cache == {}
90 in idx # True
assert idx._cache == {}
91 in idx # False
assert idx._cache == {}
idx.all()
assert idx._cache == {}
idx.any()
assert idx._cache == {}
for _ in idx:
pass
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
# df.__repr__ should not populate index cache
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cache == {}
df.loc[10:50]
assert idx._cache == {}
df.iloc[5:10]
assert idx._cache == {}
# after calling take, _cache may contain other keys, but not "_data"
idx.take([3, 0, 1])
assert "_data" not in idx._cache
df.loc[[50]]
assert "_data" not in idx._cache
df.iloc[[5, 6, 7, 8, 9]]
assert "_data" not in idx._cache
# idx._cache should contain a _data entry after call to idx._data
idx._data
assert isinstance(idx._data, np.ndarray)
assert idx._data is idx._data # check cached value is reused
assert "_data" in idx._cache
expected = np.arange(0, 100, 10, dtype="int64")
tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic_increasing is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic_increasing is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic_increasing is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic_increasing is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic_increasing is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
@pytest.mark.parametrize(
"left,right",
[
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
],
)
def test_equals_range(self, left, right):
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self, simple_index):
idx = simple_index
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self, simple_index):
index = simple_index
i = Index(index.copy())
assert i.identical(index)
# we don't allow object dtype for RangeIndex
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
idx = RangeIndex(0, 1000)
assert idx.nbytes < Index(idx._values).nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert idx.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
# shouldn't
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self, simple_index):
index = simple_index
msg = (
"Cannot change data-type for array of references.|"
"Cannot change data-type for object array.|"
)
with pytest.raises(TypeError, match=msg):
index.view(Index)
def test_prevent_casting(self, simple_index):
index = simple_index
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self, simple_index):
index = simple_index
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
@pytest.mark.parametrize(
"index",
[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self, simple_index):
index = simple_index
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = min_fitting_element(0, 2, 1)
assert 2 == result
result = min_fitting_element(1, 1, 1)
assert 1 == result
result = min_fitting_element(18, -2, 1)
assert 2 == result
result = min_fitting_element(5, -1, 1)
assert 1 == result
big_num = 500000000000000000000000
result = min_fitting_element(5, 1, big_num)
assert big_num == result
def test_slice_specialised(self, simple_index):
index = simple_index
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index([14, 18], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index([18, 14], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
# stop overshoot
index_slice = index[2:100:4]
expected = Index([4, 12], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
index_slice = index[-8::-1]
expected = Index([4, 2, 0], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected, exact="equiv")
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.mark.parametrize(
"indices, expected",
[
([RangeIndex(1, 12, 5)], RangeIndex(1, 12, 5)),
([RangeIndex(0, 6, 4)], RangeIndex(0, 6, 4)),
([RangeIndex(1, 3), RangeIndex(3, 7)], RangeIndex(1, 7)),
([RangeIndex(1, 5, 2), RangeIndex(5, 6)], RangeIndex(1, 6, 2)),
([RangeIndex(1, 3, 2), RangeIndex(4, 7, 3)], RangeIndex(1, 7, 3)),
([RangeIndex(-4, 3, 2), RangeIndex(4, 7, 2)], RangeIndex(-4, 7, 2)),
([RangeIndex(-4, -8), RangeIndex(-8, -12)], RangeIndex(0, 0)),
([RangeIndex(-4, -8), RangeIndex(3, -4)], RangeIndex(0, 0)),
([RangeIndex(-4, -8), RangeIndex(3, 5)], RangeIndex(3, 5)),
([RangeIndex(-4, -2), RangeIndex(3, 5)], Index([-4, -3, 3, 4])),
([RangeIndex(-2), RangeIndex(3, 5)], RangeIndex(3, 5)),
([RangeIndex(2), RangeIndex(2)], Index([0, 1, 0, 1])),
([RangeIndex(2), RangeIndex(2, 5), RangeIndex(5, 8, 4)], RangeIndex(0, 6)),
(
[RangeIndex(2), RangeIndex(3, 5), RangeIndex(5, 8, 4)],
Index([0, 1, 3, 4, 5]),
),
(
[RangeIndex(-2, 2), RangeIndex(2, 5), RangeIndex(5, 8, 4)],
RangeIndex(-2, 6),
),
([RangeIndex(3), Index([-1, 3, 15])], Index([0, 1, 2, -1, 3, 15])),
([RangeIndex(3), Index([-1, 3.1, 15.0])], Index([0, 1, 2, -1, 3.1, 15.0])),
([RangeIndex(3), Index(["a", None, 14])], Index([0, 1, 2, "a", None, 14])),
([RangeIndex(3, 1), Index(["a", None, 14])], Index(["a", None, 14])),
],
)
def test_append(self, indices, expected):
# GH16212
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
@pytest.mark.parametrize(
"ri",
[
RangeIndex(0, -1, -1),
RangeIndex(0, 1, 1),
RangeIndex(1, 3, 2),
RangeIndex(0, -1, -2),
RangeIndex(-3, -5, -2),
],
)
def test_append_len_one(self, ri):
# GH39401
result = ri.append([])
tm.assert_index_equal(result, ri, exact=True)
@pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])])
def test_isin_range(self, base):
# GH#41151
values = RangeIndex(0, 1)
result = base.isin(values)
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def test_sort_values_key(self):
# GH#43666, GH#52764
sort_order = {8: 2, 6: 0, 4: 8, 2: 10, 0: 12}
values = RangeIndex(0, 10, 2)
result = values.sort_values(key=lambda x: x.map(sort_order))
expected = Index([6, 8, 4, 2, 0], dtype="int64")
tm.assert_index_equal(result, expected, check_exact=True)
# check this matches the Series.sort_values behavior
ser = values.to_series()
result2 = ser.sort_values(key=lambda x: x.map(sort_order))
tm.assert_series_equal(result2, expected.to_series(), check_exact=True)
def test_range_index_rsub_by_const(self):
# GH#53255
result = 3 - RangeIndex(0, 4, 1)
expected = RangeIndex(3, -1, -1)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"rng, decimals",
[
[range(5), 0],
[range(5), 2],
[range(10, 30, 10), -1],
[range(30, 10, -10), -1],
],
)
def test_range_round_returns_rangeindex(rng, decimals):
ri = RangeIndex(rng)
expected = ri.copy()
result = ri.round(decimals=decimals)
tm.assert_index_equal(result, expected, exact=True)
@pytest.mark.parametrize(
"rng, decimals",
[
[range(10, 30, 1), -1],
[range(30, 10, -1), -1],
[range(11, 14), -10],
],
)
def test_range_round_returns_index(rng, decimals):
ri = RangeIndex(rng)
expected = Index(list(rng)).round(decimals=decimals)
result = ri.round(decimals=decimals)
tm.assert_index_equal(result, expected, exact=True)
def test_reindex_1_value_returns_rangeindex():
ri = RangeIndex(0, 10, 2, name="foo")
result, result_indexer = ri.reindex([2])
expected = RangeIndex(2, 4, 2, name="foo")
tm.assert_index_equal(result, expected, exact=True)
expected_indexer = np.array([1], dtype=np.intp)
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
def test_reindex_empty_returns_rangeindex():
ri = RangeIndex(0, 10, 2, name="foo")
result, result_indexer = ri.reindex([])
expected = RangeIndex(0, 0, 2, name="foo")
tm.assert_index_equal(result, expected, exact=True)
expected_indexer = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
def test_insert_empty_0_loc():
ri = RangeIndex(0, step=10, name="foo")
result = ri.insert(0, 5)
expected = RangeIndex(5, 15, 10, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_append_non_rangeindex_return_rangeindex():
ri = RangeIndex(1)
result = ri.append(Index([1]))
expected = RangeIndex(2)
tm.assert_index_equal(result, expected, exact=True)
def test_append_non_rangeindex_return_index():
ri = RangeIndex(1)
result = ri.append(Index([1, 3, 4]))
expected = Index([0, 1, 3, 4])
tm.assert_index_equal(result, expected, exact=True)
def test_reindex_returns_rangeindex():
ri = RangeIndex(2, name="foo")
result, result_indexer = ri.reindex([1, 2, 3])
expected = RangeIndex(1, 4, name="foo")
tm.assert_index_equal(result, expected, exact=True)
expected_indexer = np.array([1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
def test_reindex_returns_index():
ri = RangeIndex(4, name="foo")
result, result_indexer = ri.reindex([0, 1, 3])
expected = Index([0, 1, 3], name="foo")
tm.assert_index_equal(result, expected, exact=True)
expected_indexer = np.array([0, 1, 3], dtype=np.intp)
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
def test_take_return_rangeindex():
ri = RangeIndex(5, name="foo")
result = ri.take([])
expected = RangeIndex(0, name="foo")
tm.assert_index_equal(result, expected, exact=True)
result = ri.take([3, 4])
expected = RangeIndex(3, 5, name="foo")
tm.assert_index_equal(result, expected, exact=True)
@pytest.mark.parametrize(
"rng, exp_rng",
[
[range(5), range(3, 4)],
[range(0, -10, -2), range(-6, -8, -2)],
[range(0, 10, 2), range(6, 8, 2)],
],
)
def test_take_1_value_returns_rangeindex(rng, exp_rng):
ri = RangeIndex(rng, name="foo")
result = ri.take([3])
expected = RangeIndex(exp_rng, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_append_one_nonempty_preserve_step():
expected = RangeIndex(0, -1, -1)
result = RangeIndex(0).append([expected])
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_boolmask_all_true():
ri = RangeIndex(3, name="foo")
expected = ri.copy()
result = ri[[True] * 3]
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_boolmask_all_false():
ri = RangeIndex(3, name="foo")
result = ri[[False] * 3]
expected = RangeIndex(0, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_boolmask_returns_rangeindex():
ri = RangeIndex(3, name="foo")
result = ri[[False, True, True]]
expected = RangeIndex(1, 3, name="foo")
tm.assert_index_equal(result, expected, exact=True)
result = ri[[True, False, True]]
expected = RangeIndex(0, 3, 2, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_boolmask_returns_index():
ri = RangeIndex(4, name="foo")
result = ri[[True, True, False, True]]
expected = Index([0, 1, 3], name="foo")
tm.assert_index_equal(result, expected)
def test_getitem_boolmask_wrong_length():
ri = RangeIndex(4, name="foo")
with pytest.raises(IndexError, match="Boolean index has wrong length"):
ri[[True]]
def test_pos_returns_rangeindex():
ri = RangeIndex(2, name="foo")
expected = ri.copy()
result = +ri
tm.assert_index_equal(result, expected, exact=True)
def test_neg_returns_rangeindex():
ri = RangeIndex(2, name="foo")
result = -ri
expected = RangeIndex(0, -2, -1, name="foo")
tm.assert_index_equal(result, expected, exact=True)
ri = RangeIndex(-2, 2, name="foo")
result = -ri
expected = RangeIndex(2, -2, -1, name="foo")
tm.assert_index_equal(result, expected, exact=True)
@pytest.mark.parametrize(
"rng, exp_rng",
[
[range(0), range(0)],
[range(10), range(10)],
[range(-2, 1, 1), range(2, -1, -1)],
[range(0, -10, -1), range(0, 10, 1)],
],
)
def test_abs_returns_rangeindex(rng, exp_rng):
ri = RangeIndex(rng, name="foo")
expected = RangeIndex(exp_rng, name="foo")
result = abs(ri)
tm.assert_index_equal(result, expected, exact=True)
def test_abs_returns_index():
ri = RangeIndex(-2, 2, name="foo")
result = abs(ri)
expected = Index([2, 1, 0, 1], name="foo")
tm.assert_index_equal(result, expected, exact=True)
@pytest.mark.parametrize(
"rng",
[
range(0),
range(5),
range(0, -5, -1),
range(-2, 2, 1),
range(2, -2, -2),
range(0, 5, 2),
],
)
def test_invert_returns_rangeindex(rng):
ri = RangeIndex(rng, name="foo")
result = ~ri
assert isinstance(result, RangeIndex)
expected = ~Index(list(rng), name="foo")
tm.assert_index_equal(result, expected, exact=False)
@pytest.mark.parametrize(
"rng",
[
range(0, 5, 1),
range(0, 5, 2),
range(10, 15, 1),
range(10, 5, -1),
range(10, 5, -2),
range(5, 0, -1),
],
)
@pytest.mark.parametrize("meth", ["argmax", "argmin"])
def test_arg_min_max(rng, meth):
ri = RangeIndex(rng)
idx = Index(list(rng))
assert getattr(ri, meth)() == getattr(idx, meth)()
@pytest.mark.parametrize("meth", ["argmin", "argmax"])
def test_empty_argmin_argmax_raises(meth):
with pytest.raises(ValueError, match=f"attempt to get {meth} of an empty sequence"):
getattr(RangeIndex(0), meth)()
def test_getitem_integers_return_rangeindex():
result = RangeIndex(0, 10, 2, name="foo")[[0, -1]]
expected = RangeIndex(start=0, stop=16, step=8, name="foo")
tm.assert_index_equal(result, expected, exact=True)
result = RangeIndex(0, 10, 2, name="foo")[[3]]
expected = RangeIndex(start=6, stop=8, step=2, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_empty_return_rangeindex():
result = RangeIndex(0, 10, 2, name="foo")[[]]
expected = RangeIndex(start=0, stop=0, step=1, name="foo")
tm.assert_index_equal(result, expected, exact=True)
def test_getitem_integers_return_index():
result = RangeIndex(0, 10, 2, name="foo")[[0, 1, -1]]
expected = Index([0, 2, 8], dtype="int64", name="foo")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize(
"rng",
[
range(3),
range(0),
range(0, 3, 2),
range(3, -3, -2),
],
)
def test_value_counts(sort, dropna, ascending, normalize, rng):
ri = RangeIndex(rng, name="A")
result = ri.value_counts(
normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
)
expected = Index(list(rng), name="A").value_counts(
normalize=normalize, sort=sort, ascending=ascending, dropna=dropna
)
tm.assert_series_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize("value", [0, -5, 5, -3, np.array([-5, -3, 0, 5])])
def test_searchsorted(side, value):
ri = RangeIndex(-3, 3, 2)
result = ri.searchsorted(value=value, side=side)
expected = Index(list(ri)).searchsorted(value=value, side=side)
if isinstance(value, int):
assert result == expected
else:
tm.assert_numpy_array_equal(result, expected)
| TestRangeIndex |
python | numba__numba | numba/tests/test_dyn_array.py | {
"start": 33526,
"end": 34401
} | class ____(BaseTest):
def check_identity(self, pyfunc):
self.check_outputs(pyfunc, [(3,)])
def test_identity(self):
def func(n):
return np.identity(n)
self.check_identity(func)
def test_identity_dtype(self):
for dtype in (np.complex64, np.int16, np.bool_, np.dtype('bool'),
'bool_'):
def func(n):
return np.identity(n, dtype)
self.check_identity(func)
def test_like_dtype_non_const_str_kwarg(self):
@njit
def func(n, dt):
return np.identity(n, dt)
with self.assertRaises(TypingError) as raises:
func(4, 'int32')
excstr = str(raises.exception)
msg = ("If np.identity dtype is a string it must be a "
"string constant.")
self.assertIn(msg, excstr)
| TestNdIdentity |
python | dask__distributed | distributed/comm/tcp.py | {
"start": 20232,
"end": 20408
} | class ____(BaseTCPConnector):
prefix = "tcp://"
comm_class = TCP
encrypted = False
def _get_connect_args(self, **connection_args):
return {}
| TCPConnector |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 34880,
"end": 50582
} | class ____(_ConverterData):
"""Container for Session-based conversion data."""
def __init__(self,
session,
graph_def,
output_node_names,
variable_names_allowlist=None,
variable_names_denylist=None):
graph_def = graph_util.extract_sub_graph(graph_def, output_node_names)
super(_SessionConverterData, self).__init__(
graph_def,
variable_names_allowlist=variable_names_allowlist,
variable_names_denylist=variable_names_denylist)
nodes_to_convert = []
tensor_names_to_convert = []
for node in self.graph_def.node:
if node.op in ["Variable", "VariableV2", "VarHandleOp"]:
tensor_name = node.name
if not self._should_convert(tensor_name):
continue
if node.op == "VarHandleOp":
tensor_name = tensor_name + "/Read/ReadVariableOp"
nodes_to_convert.append(node)
tensor_names_to_convert.append(tensor_name + ":0")
if tensor_names_to_convert:
converted_tensors = session.run(tensor_names_to_convert)
for node, tensor_value in zip(nodes_to_convert, converted_tensors):
self._tensor_data[node.name] = _TensorData(
numpy=tensor_value, dtype=node.attr["dtype"].type, index=None)
def disable_lower_using_switch_merge(graph_def):
"""Set '_lower_using_switch_merge' attributes to False.
Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs
in each function's graph.
Args:
graph_def: GraphDef proto.
Returns:
GraphDef
"""
output_graph_def = graph_pb2.GraphDef()
output_graph_def.CopyFrom(graph_def)
def disable_control_flow_lowering(node):
if node.op in _CONTROL_FLOW_OPS:
node.attr["_lower_using_switch_merge"].b = False
for node in output_graph_def.node:
disable_control_flow_lowering(node)
if output_graph_def.library:
for func in output_graph_def.library.function:
for node in func.node_def:
disable_control_flow_lowering(node)
return output_graph_def
def _run_inline_graph_optimization(func, lower_control_flow,
aggressive_inlining):
"""Apply function inline optimization to the graph.
Returns the GraphDef after Grappler's function inlining optimization is
applied. This optimization does not work on models with control flow.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops not
properly connected to control outputs).
Returns:
GraphDef
"""
graph_def = func.graph.as_graph_def()
if not lower_control_flow:
graph_def = disable_lower_using_switch_merge(graph_def)
# In some cases, a secondary implementation of the function (e.g. for GPU) is
# written to the "api_implements" attribute. (e.g. `tf.keras.layers.LSTM` in
# TF2 produces a CuDNN-based RNN for GPU).
# This function suppose to inline all functions calls, but "api_implements"
# prevents this from happening. Removing the attribute solves the problem.
# To learn more about "api_implements", see:
# tensorflow/core/grappler/optimizers/implementation_selector.h
for function in graph_def.library.function:
if "api_implements" in function.attr:
del function.attr["api_implements"]
meta_graph = export_meta_graph(graph_def=graph_def, graph=func.graph)
# Clear the initializer_name for the variables collections, since they are not
# needed after saved to saved_model.
for name in [
"variables", "model_variables", "trainable_variables", "local_variables"
]:
raw_list = []
for raw in meta_graph.collection_def["variables"].bytes_list.value:
variable = variable_pb2.VariableDef()
variable.ParseFromString(raw)
variable.ClearField("initializer_name")
raw_list.append(variable.SerializeToString())
meta_graph.collection_def[name].bytes_list.value[:] = raw_list
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in func.inputs + func.outputs:
fetch_collection.node_list.value.append(array.name)
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
# Initialize RewriterConfig with everything disabled except function inlining.
config = config_pb2.ConfigProto()
rewrite_options = config.graph_options.rewrite_options
rewrite_options.min_graph_nodes = -1 # do not skip small graphs
rewrite_options.optimizers.append("function")
if aggressive_inlining:
rewrite_options.function_optimization =\
rewriter_config_pb2.RewriterConfig.AGGRESSIVE
return tf_optimizer.OptimizeGraph(config, meta_graph)
def _construct_concrete_function(func, output_graph_def,
converted_input_indices):
"""Constructs a concrete function from the `output_graph_def`.
Args:
func: ConcreteFunction
output_graph_def: GraphDef proto.
converted_input_indices: Set of integers of input indices that were
converted to constants.
Returns:
ConcreteFunction.
"""
# Create a ConcreteFunction from the new GraphDef.
input_tensors = func.graph.internal_captures
converted_inputs = object_identity.ObjectIdentitySet(
[input_tensors[index] for index in converted_input_indices])
not_converted_inputs = [
tensor for tensor in func.inputs if tensor not in converted_inputs
]
not_converted_inputs_map = {
tensor.name: tensor for tensor in not_converted_inputs
}
new_input_names = [tensor.name for tensor in not_converted_inputs]
new_output_names = [tensor.name for tensor in func.outputs]
# Remove old functions to use updated functions from graph def.
for f in output_graph_def.library.function:
if context.context().has_function(f.signature.name):
context.context().remove_function(f.signature.name)
new_func = wrap_function.function_from_graph_def(output_graph_def,
new_input_names,
new_output_names)
# Manually propagate shape for input tensors where the shape is not correctly
# propagated. Scalars shapes are lost when wrapping the function.
for input_tensor in new_func.inputs:
input_tensor.set_shape(not_converted_inputs_map[input_tensor.name].shape)
return new_func
def _replace_variables_by_constants(converter_data):
"""Replaces variables by constants on a given graph.
Given a _ConverterData instance with converted variables in its tensor_data
field, create a new graph where the respective variables are replaced with the
converted constants.
Args:
converter_data: A pre-populated _ConverterData instance.
Returns:
The converted graph.
"""
input_graph = _GraphDef(converter_data.graph_def)
for tensor_name, tensor_data in converter_data.tensor_data.items():
input_graph.nodes[tensor_name].convert_variable_to_constant(
None, tensor_data)
converted_graph = input_graph.converted_self().graph_def
converted_input_indices = {
t.index
for t in converter_data.tensor_data.values()
if t.index is not None
}
return converted_graph, converted_input_indices
def convert_variables_to_constants_v2(func,
lower_control_flow=True,
aggressive_inlining=False):
"""Replaces all the variables in a graph with constants of the same values.
TensorFlow 2.0 function for converting all Variable ops into Const ops holding
the same values. This makes it possible to describe the network fully with a
single GraphDef file, and allows the removal of a lot of ops related to
loading and saving the variables. This function runs Grappler's function
inlining optimization in order to return a single subgraph.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs). (default False)
Returns:
ConcreteFunction containing a simplified version of the original.
"""
converter_data = _FunctionConverterDataInEager(
func=func,
lower_control_flow=lower_control_flow,
aggressive_inlining=aggressive_inlining)
output_graph_def, converted_input_indices = _replace_variables_by_constants(
converter_data=converter_data)
return _construct_concrete_function(func, output_graph_def,
converted_input_indices)
def convert_var_to_const_function_in_v1(func,
lower_control_flow=True,
aggressive_inlining=False):
"""Replaces all the variables in a graph with constants of the same values.
This function works as same as convert_variables_to_constants_v2, but it
should be used in Graph mode. It is a temporary solution when users want to
integrate their models written in TF2 with infra that requires TF1 mode.
The current implementation only works for graphs that do not contain any
control flow or embedding related ops.
The function must be called in a Session context.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs). (default False)
Raises:
RuntimeError: If no Session context is present.
Returns:
ConcreteFunction containing a simplified version of the original.
"""
session = ops.get_default_session()
if session is None:
raise RuntimeError(
"The conversion must be carried out in a Session context.")
converter_data = _FunctionConverterDataInGraph(
func=func,
lower_control_flow=lower_control_flow,
aggressive_inlining=aggressive_inlining,
session=session)
output_graph_def, converted_input_indices = _replace_variables_by_constants(
converter_data=converter_data)
return _construct_concrete_function(func, output_graph_def,
converted_input_indices)
def convert_variables_to_constants_v2_as_graph(func,
lower_control_flow=True,
aggressive_inlining=False):
"""Replaces all the variables in a graph with constants of the same values.
This function works as same as convert_variables_to_constants_v2, but it
returns the intermediate `GraphDef` as well. This `GraphDef` contains all the
debug information after all the transformations in the frozen phase.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control flow
ops such as If and While. (default True)
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs).
Returns:
ConcreteFunction containing a simplified version of the original, and also
the intermediate GraphDef containing the node debug information for the
transformations in the frozen phase.
"""
converter_data = _FunctionConverterDataInEager(
func=func,
lower_control_flow=lower_control_flow,
aggressive_inlining=aggressive_inlining)
output_graph_def, converted_input_indices = _replace_variables_by_constants(
converter_data=converter_data)
frozen_func = _construct_concrete_function(func, output_graph_def,
converted_input_indices)
return frozen_func, output_graph_def
def convert_variables_to_constants_from_session_graph(
session,
graph_def,
output_node_names,
variable_names_allowlist=None,
variable_names_denylist=None):
"""Replaces all the variables in a graph with constants of the same values.
This function works similarly to convert_variables_to_constants_v2, but it
retrieves the constant values from a Session instead of from a
ConcreteFunction. This is useful when converting graphs generated from
TensorFlow V1, where ConcreteFunctions are not available. This also differs
from graph_util.convert_variables_to_constants in that it supports resource
variables when V2 control flow constructions are present.
Args:
session: Active TensorFlow session containing the variables.
graph_def: A GraphDef to convert.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_allowlist: The set of variable names to convert (by default,
all variables are converted).
variable_names_denylist: The set of variable names to omit converting to
constants.
Returns:
An optimized GraphDef.
"""
graph_def, _ = _replace_variables_by_constants(
converter_data=_SessionConverterData(
session=session,
graph_def=graph_def,
output_node_names=output_node_names,
variable_names_allowlist=variable_names_allowlist,
variable_names_denylist=variable_names_denylist))
return graph_def
@deprecation.deprecated(
date=None,
instructions="This API was designed for TensorFlow v1. See "
"https://www.tensorflow.org/guide/migrate for instructions on how to "
"migrate your code to TensorFlow v2."
)
@tf_export(v1=["graph_util.convert_variables_to_constants"])
def convert_variables_to_constants(sess,
input_graph_def,
output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
variable_names_whitelist: The set of variable names to convert (by default,
all variables are converted).
variable_names_blacklist: The set of variable names to omit converting to
constants.
Returns:
GraphDef containing a simplified version of the original.
Raises:
RuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both
denylisted AND whitelisted for freezing.
"""
ret = convert_variables_to_constants_from_session_graph(
session=sess,
graph_def=input_graph_def,
output_node_names=output_node_names,
variable_names_allowlist=variable_names_whitelist,
variable_names_denylist=variable_names_blacklist)
return ret
| _SessionConverterData |
python | psf__black | tests/data/cases/comments4.py | {
"start": 297,
"end": 3532
} | class ____:
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"None is an invalid value for Metadata-Version. Error: This field is"
" required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata"
" Version see"
" https://packaging.python.org/specifications/core-metadata",
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. Error: Must start and end with a"
" letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. Error: Must start and end with"
" a letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
],
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
pyramid_config.testing_securitypolicy(userid=1)
db_request.POST = MultiDict(post_data)
def foo(list_a, list_b):
results = (
User.query.filter(User.foo == "bar")
.filter( # Because foo.
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
# Another comment about the filtering on is_quux goes here.
.filter(db.not_(User.is_pending.astext.cast(db.Boolean).is_(True)))
.order_by(User.created_at.desc())
.with_for_update(key_share=True)
.all()
)
return results
def foo2(list_a, list_b):
# Standalone comment reasonably placed.
return (
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
def foo3(list_a, list_b):
return (
# Standalone comment but weirdly placed.
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
| C |
python | crytic__slither | slither/vyper_parsing/declarations/function.py | {
"start": 1322,
"end": 23381
} | class ____: # pylint: disable=too-many-instance-attributes
def __init__(
self,
function: Function,
function_data: FunctionDef,
contract_parser: "ContractVyper",
) -> None:
self._function = function
self._function.name = function_data.name
self._function.id = function_data.node_id
self._functionNotParsed = function_data
self._decoratorNotParsed = None
self._local_variables_parser: List[LocalVariableVyper] = []
self._variables_renamed = []
self._contract_parser = contract_parser
self._node_to_NodeVyper: Dict[Node, NodeVyper] = {}
for decorator in function_data.decorators:
if isinstance(decorator, Call):
# TODO handle multiple
self._decoratorNotParsed = decorator
elif isinstance(decorator, Name):
if decorator.id in ["external", "public", "internal"]:
self._function.visibility = decorator.id
elif decorator.id == "view":
self._function.view = True
elif decorator.id == "pure":
self._function.pure = True
elif decorator.id == "payable":
self._function.payable = True
elif decorator.id == "nonpayable":
self._function.payable = False
else:
raise ValueError(f"Unknown decorator {decorator.id}")
# Interfaces do not have decorators and are external
if self._function._visibility is None:
self._function.visibility = "external"
self._params_was_analyzed = False
self._content_was_analyzed = False
self._counter_scope_local_variables = 0
if function_data.doc_string is not None:
function.has_documentation = True
self._analyze_function_type()
@property
def underlying_function(self) -> Function:
return self._function
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self._function.compilation_unit
###################################################################################
###################################################################################
# region Variables
###################################################################################
###################################################################################
@property
def variables_renamed(
self,
) -> Dict[int, LocalVariableVyper]:
return self._variables_renamed
def _add_local_variable(self, local_var_parser: LocalVariableVyper) -> None:
# Ensure variables name are unique for SSA conversion
# This should not apply to actual Vyper variables currently
# but is necessary if we have nested loops where we've created artificial variables e.g. counter_var
if local_var_parser.underlying_variable.name:
known_variables = [v.name for v in self._function.variables]
while local_var_parser.underlying_variable.name in known_variables:
local_var_parser.underlying_variable.name += (
f"_scope_{self._counter_scope_local_variables}"
)
self._counter_scope_local_variables += 1
known_variables = [v.name for v in self._function.variables]
# TODO no reference ID
# if local_var_parser.reference_id is not None:
# self._variables_renamed[local_var_parser.reference_id] = local_var_parser
self._function.variables_as_dict[
local_var_parser.underlying_variable.name
] = local_var_parser.underlying_variable
self._local_variables_parser.append(local_var_parser)
# endregion
###################################################################################
###################################################################################
# region Analyses
###################################################################################
###################################################################################
@property
def function_not_parsed(self) -> Dict:
return self._functionNotParsed
def _analyze_function_type(self) -> None:
if self._function.name == "__init__":
self._function.function_type = FunctionType.CONSTRUCTOR
elif self._function.name == "__default__":
self._function.function_type = FunctionType.FALLBACK
else:
self._function.function_type = FunctionType.NORMAL
def analyze_params(self) -> None:
if self._params_was_analyzed:
return
self._params_was_analyzed = True
params = self._functionNotParsed.args
returns = self._functionNotParsed.returns
if params:
self._parse_params(params)
if returns:
self._parse_returns(returns)
def analyze_content(self) -> None:
if self._content_was_analyzed:
return
self._content_was_analyzed = True
body = self._functionNotParsed.body
if body and not isinstance(body[0], Pass):
self._function.is_implemented = True
self._function.is_empty = False
self._parse_cfg(body)
self._update_reachability(self._function.entry_point)
else:
self._function.is_implemented = False
self._function.is_empty = True
for local_var_parser in self._local_variables_parser:
local_var_parser.analyze(self._function)
for node_parser in self._node_to_NodeVyper.values():
node_parser.analyze_expressions(self._function)
self._analyze_decorator()
def _analyze_decorator(self) -> None:
if not self._decoratorNotParsed:
return
decorator = self._decoratorNotParsed
if decorator.args:
name = f"{decorator.func.id}({decorator.args[0].value})"
else:
name = decorator.func.id
contract = self._contract_parser.underlying_contract
compilation_unit = self._contract_parser.underlying_contract.compilation_unit
modifier = Modifier(compilation_unit)
modifier.name = name
modifier.set_offset(decorator.src, compilation_unit)
modifier.set_contract(contract)
modifier.set_contract_declarer(contract)
latest_entry_point = self._function.entry_point
self._function.add_modifier(
ModifierStatements(
modifier=modifier,
entry_point=latest_entry_point,
nodes=[latest_entry_point],
)
)
# endregion
###################################################################################
###################################################################################
# region Nodes
###################################################################################
###################################################################################
def _new_node(
self, node_type: NodeType, src: Union[str, Source], scope: Union[Scope, "Function"]
) -> NodeVyper:
node = self._function.new_node(node_type, src, scope)
node_parser = NodeVyper(node)
self._node_to_NodeVyper[node] = node_parser
return node_parser
# endregion
###################################################################################
###################################################################################
# region Parsing function
###################################################################################
###################################################################################
def _update_reachability(self, node: Node) -> None:
if node.is_reachable:
return
node.set_is_reachable(True)
for son in node.sons:
self._update_reachability(son)
# pylint: disable=too-many-branches,too-many-statements,protected-access,too-many-locals
def _parse_cfg(self, cfg: List[ASTNode]) -> None:
entry_node = self._new_node(NodeType.ENTRYPOINT, "-1:-1:-1", self.underlying_function)
self._function.entry_point = entry_node.underlying_node
scope = Scope(True, False, self.underlying_function)
def parse_statement(
curr_node: NodeVyper,
expr: ASTNode,
continue_destination=None,
break_destination=None,
) -> NodeVyper:
if isinstance(expr, AnnAssign):
local_var = LocalVariable()
local_var.set_function(self._function)
local_var.set_offset(expr.src, self._function.compilation_unit)
local_var_parser = LocalVariableVyper(local_var, expr)
self._add_local_variable(local_var_parser)
new_node = self._new_node(NodeType.VARIABLE, expr.src, scope)
if expr.value is not None:
local_var.initialized = True
new_node.add_unparsed_expression(expr.value)
new_node.underlying_node.add_variable_declaration(local_var)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, (AugAssign, Assign)):
new_node = self._new_node(NodeType.EXPRESSION, expr.src, scope)
new_node.add_unparsed_expression(expr)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, Expr):
# TODO This is a workaround to handle Vyper putting payable/view in the function body... https://github.com/vyperlang/vyper/issues/3578
if not isinstance(expr.value, Name):
new_node = self._new_node(NodeType.EXPRESSION, expr.src, scope)
new_node.add_unparsed_expression(expr.value)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, For):
node_startLoop = self._new_node(NodeType.STARTLOOP, expr.src, scope)
node_endLoop = self._new_node(NodeType.ENDLOOP, expr.src, scope)
link_underlying_nodes(curr_node, node_startLoop)
local_var = LocalVariable()
local_var.set_function(self._function)
local_var.set_offset(expr.src, self._function.compilation_unit)
counter_var = AnnAssign(
expr.target.src,
expr.target.node_id,
target=Name("-1:-1:-1", -1, "counter_var"),
annotation=Name("-1:-1:-1", -1, "uint256"),
value=Int("-1:-1:-1", -1, 0),
)
local_var_parser = LocalVariableVyper(local_var, counter_var)
self._add_local_variable(local_var_parser)
counter_node = self._new_node(NodeType.VARIABLE, expr.src, scope)
local_var.initialized = True
counter_node.add_unparsed_expression(counter_var.value)
counter_node.underlying_node.add_variable_declaration(local_var)
link_underlying_nodes(node_startLoop, counter_node)
node_condition = None
if isinstance(expr.iter, (Attribute, Name)):
# HACK
# The loop variable is not annotated so we infer its type by looking at the type of the iterator
if isinstance(expr.iter, Attribute): # state variable
iter_expr = expr.iter
loop_iterator = list(
filter(
lambda x: x._variable.name == iter_expr.attr,
self._contract_parser._variables_parser,
)
)[0]
else: # local variable
iter_expr = expr.iter
loop_iterator = list(
filter(
lambda x: x._variable.name == iter_expr.id,
self._local_variables_parser,
)
)[0]
# TODO use expr.src instead of -1:-1:1?
cond_expr = Compare(
"-1:-1:-1",
-1,
left=Name("-1:-1:-1", -1, "counter_var"),
op="<=",
right=Call(
"-1:-1:-1",
-1,
func=Name("-1:-1:-1", -1, "len"),
args=[iter_expr],
keywords=[],
keyword=None,
),
)
node_condition = self._new_node(NodeType.IFLOOP, expr.src, scope)
node_condition.add_unparsed_expression(cond_expr)
if loop_iterator._elem_to_parse.value.id == "DynArray":
loop_var_annotation = loop_iterator._elem_to_parse.slice.value.elements[0]
else:
loop_var_annotation = loop_iterator._elem_to_parse.value
value = Subscript(
"-1:-1:-1",
-1,
value=Name("-1:-1:-1", -1, loop_iterator._variable.name),
slice=Index("-1:-1:-1", -1, value=Name("-1:-1:-1", -1, "counter_var")),
)
loop_var = AnnAssign(
expr.target.src,
expr.target.node_id,
target=expr.target,
annotation=loop_var_annotation,
value=value,
)
elif isinstance(expr.iter, Call): # range
range_val = expr.iter.args[0]
cond_expr = Compare(
"-1:-1:-1",
-1,
left=Name("-1:-1:-1", -1, "counter_var"),
op="<=",
right=range_val,
)
node_condition = self._new_node(NodeType.IFLOOP, expr.src, scope)
node_condition.add_unparsed_expression(cond_expr)
loop_var = AnnAssign(
expr.target.src,
expr.target.node_id,
target=expr.target,
annotation=Name("-1:-1:-1", -1, "uint256"),
value=Name("-1:-1:-1", -1, "counter_var"),
)
else:
raise NotImplementedError
# After creating condition node, we link it declaration of the loop variable
link_underlying_nodes(counter_node, node_condition)
# Create an expression for the loop increment (counter_var += 1)
loop_increment = AugAssign(
"-1:-1:-1",
-1,
target=Name("-1:-1:-1", -1, "counter_var"),
op="+=",
value=Int("-1:-1:-1", -1, 1),
)
node_increment = self._new_node(NodeType.EXPRESSION, expr.src, scope)
node_increment.add_unparsed_expression(loop_increment)
link_underlying_nodes(node_increment, node_condition)
continue_destination = node_increment
break_destination = node_endLoop
# We assign the index variable or range variable in the loop body on each iteration
expr.body.insert(0, loop_var)
body_node = None
new_node = node_condition
for stmt in expr.body:
body_node = parse_statement(
new_node, stmt, continue_destination, break_destination
)
new_node = body_node
if body_node is not None:
link_underlying_nodes(body_node, node_increment)
link_underlying_nodes(node_condition, node_endLoop)
curr_node = node_endLoop
elif isinstance(expr, Continue):
new_node = self._new_node(NodeType.CONTINUE, expr.src, scope)
link_underlying_nodes(curr_node, new_node)
link_underlying_nodes(new_node, continue_destination)
elif isinstance(expr, Break):
new_node = self._new_node(NodeType.BREAK, expr.src, scope)
link_underlying_nodes(curr_node, new_node)
link_underlying_nodes(new_node, break_destination)
elif isinstance(expr, Return):
new_node = self._new_node(NodeType.RETURN, expr.src, scope)
if expr.value is not None:
new_node.add_unparsed_expression(expr.value)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, Assert):
new_node = self._new_node(NodeType.EXPRESSION, expr.src, scope)
new_node.add_unparsed_expression(expr)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, Log):
new_node = self._new_node(NodeType.EXPRESSION, expr.src, scope)
new_node.add_unparsed_expression(expr.value)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
elif isinstance(expr, If):
condition_node = self._new_node(NodeType.IF, expr.test.src, scope)
condition_node.add_unparsed_expression(expr.test)
endIf_node = self._new_node(NodeType.ENDIF, expr.src, scope)
true_node = None
new_node = condition_node
for stmt in expr.body:
true_node = parse_statement(
new_node, stmt, continue_destination, break_destination
)
new_node = true_node
link_underlying_nodes(true_node, endIf_node)
false_node = None
new_node = condition_node
for stmt in expr.orelse:
false_node = parse_statement(
new_node, stmt, continue_destination, break_destination
)
new_node = false_node
if false_node is not None:
link_underlying_nodes(false_node, endIf_node)
else:
link_underlying_nodes(condition_node, endIf_node)
link_underlying_nodes(curr_node, condition_node)
curr_node = endIf_node
elif isinstance(expr, Pass):
pass
elif isinstance(expr, Raise):
new_node = self._new_node(NodeType.EXPRESSION, expr.src, scope)
new_node.add_unparsed_expression(expr)
link_underlying_nodes(curr_node, new_node)
curr_node = new_node
else:
raise ParsingError(f"Statement not parsed {expr.__class__.__name__} {expr}")
return curr_node
curr_node = entry_node
for expr in cfg:
curr_node = parse_statement(curr_node, expr)
# endregion
###################################################################################
###################################################################################
def _add_param(self, param: Arg, initialized: bool = False) -> LocalVariableVyper:
local_var = LocalVariable()
local_var.set_function(self._function)
local_var.set_offset(param.src, self._function.compilation_unit)
local_var_parser = LocalVariableVyper(local_var, param)
if initialized:
local_var.initialized = True
if local_var.location == "default":
local_var.set_location("memory")
self._add_local_variable(local_var_parser)
return local_var_parser
def _parse_params(self, params: Arguments):
self._function.parameters_src().set_offset(params.src, self._function.compilation_unit)
if params.defaults:
self._function._default_args_as_expressions = params.defaults
for param in params.args:
local_var = self._add_param(param)
self._function.add_parameters(local_var.underlying_variable)
def _parse_returns(self, returns: Union[Name, TupleVyper, Subscript]):
self._function.returns_src().set_offset(returns.src, self._function.compilation_unit)
# Only the type of the arg is given, not a name. We create an `Arg` with an empty name
# so that the function has the correct return type in its signature but doesn't clash with
# other identifiers during name resolution (`find_variable`).
if isinstance(returns, (Name, Subscript)):
local_var = self._add_param(Arg(returns.src, returns.node_id, "", annotation=returns))
self._function.add_return(local_var.underlying_variable)
else:
assert isinstance(returns, TupleVyper)
for ret in returns.elements:
local_var = self._add_param(Arg(ret.src, ret.node_id, "", annotation=ret))
self._function.add_return(local_var.underlying_variable)
###################################################################################
###################################################################################
| FunctionVyper |
python | pypa__pipenv | pipenv/vendor/click/utils.py | {
"start": 5597,
"end": 16141
} | class ____:
def __init__(self, file: t.IO[t.Any]) -> None:
self._file: t.IO[t.Any] = file
def __getattr__(self, name: str) -> t.Any:
return getattr(self._file, name)
def __enter__(self) -> "KeepOpenFile":
return self
def __exit__(
self,
exc_type: t.Optional[t.Type[BaseException]],
exc_value: t.Optional[BaseException],
tb: t.Optional[TracebackType],
) -> None:
pass
def __repr__(self) -> str:
return repr(self._file)
def __iter__(self) -> t.Iterator[t.AnyStr]:
return iter(self._file)
def echo(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# There are no standard streams attached to write to. For example,
# pythonw on Windows.
if file is None:
return
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: t.Optional[t.Union[str, bytes]] = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
def get_binary_stream(name: "te.Literal['stdin', 'stdout', 'stderr']") -> t.BinaryIO:
"""Returns a system stream for byte processing.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
"""
opener = binary_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener()
def get_text_stream(
name: "te.Literal['stdin', 'stdout', 'stderr']",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
) -> t.TextIO:
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts for already
correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError(f"Unknown standard stream '{name}'")
return opener(encoding, errors)
def open_file(
filename: str,
mode: str = "r",
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
lazy: bool = False,
atomic: bool = False,
) -> t.IO[t.Any]:
"""Open a file, with extra behavior to handle ``'-'`` to indicate
a standard stream, lazy open on write, and atomic write. Similar to
the behavior of the :class:`~click.File` param type.
If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is
wrapped so that using it in a context manager will not close it.
This makes it possible to use the function without accidentally
closing a standard stream:
.. code-block:: python
with open_file(filename) as f:
...
:param filename: The name of the file to open, or ``'-'`` for
``stdin``/``stdout``.
:param mode: The mode in which to open the file.
:param encoding: The encoding to decode or encode a file opened in
text mode.
:param errors: The error handling mode.
:param lazy: Wait to open the file until it is accessed. For read
mode, the file is temporarily opened to raise access errors
early, then closed until it is read again.
:param atomic: Write to a temporary file and replace the given file
on close.
.. versionadded:: 3.0
"""
if lazy:
return t.cast(
t.IO[t.Any], LazyFile(filename, mode, encoding, errors, atomic=atomic)
)
f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
if not should_close:
f = t.cast(t.IO[t.Any], KeepOpenFile(f))
return f
def format_filename(
filename: "t.Union[str, bytes, os.PathLike[str], os.PathLike[bytes]]",
shorten: bool = False,
) -> str:
"""Format a filename as a string for display. Ensures the filename can be
displayed by replacing any invalid bytes or surrogate escapes in the name
with the replacement character ``�``.
Invalid bytes or surrogate escapes will raise an error when written to a
stream with ``errors="strict". This will typically happen with ``stdout``
when the locale is something like ``en_GB.UTF-8``.
Many scenarios *are* safe to write surrogates though, due to PEP 538 and
PEP 540, including:
- Writing to ``stderr``, which uses ``errors="backslashreplace"``.
- The system has ``LANG=C.UTF-8``, ``C``, or ``POSIX``. Python opens
stdout and stderr with ``errors="surrogateescape"``.
- None of ``LANG/LC_*`` are set. Python assumes ``LANG=C.UTF-8``.
- Python is started in UTF-8 mode with ``PYTHONUTF8=1`` or ``-X utf8``.
Python opens stdout and stderr with ``errors="surrogateescape"``.
:param filename: formats a filename for UI display. This will also convert
the filename into unicode without failing.
:param shorten: this optionally shortens the filename to strip of the
path that leads up to it.
"""
if shorten:
filename = os.path.basename(filename)
else:
filename = os.fspath(filename)
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(), "replace")
else:
filename = filename.encode("utf-8", "surrogateescape").decode(
"utf-8", "replace"
)
return filename
def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:
r"""Returns the config folder for the application. The default behavior
is to return whatever is most appropriate for the operating system.
To give you an idea, for an app called ``"Foo Bar"``, something like
the following folders could be returned:
Mac OS X:
``~/Library/Application Support/Foo Bar``
Mac OS X (POSIX):
``~/.foo-bar``
Unix:
``~/.config/foo-bar``
Unix (POSIX):
``~/.foo-bar``
Windows (roaming):
``C:\Users\<user>\AppData\Roaming\Foo Bar``
Windows (not roaming):
``C:\Users\<user>\AppData\Local\Foo Bar``
.. versionadded:: 2.0
:param app_name: the application name. This should be properly capitalized
and can contain whitespace.
:param roaming: controls if the folder should be roaming or not on Windows.
Has no effect otherwise.
:param force_posix: if this is set to `True` then on any POSIX system the
folder will be stored in the home folder with a leading
dot instead of the XDG config home or darwin's
application support folder.
"""
if WIN:
key = "APPDATA" if roaming else "LOCALAPPDATA"
folder = os.environ.get(key)
if folder is None:
folder = os.path.expanduser("~")
return os.path.join(folder, app_name)
if force_posix:
return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
if sys.platform == "darwin":
return os.path.join(
os.path.expanduser("~/Library/Application Support"), app_name
)
return os.path.join(
os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
_posixify(app_name),
)
| KeepOpenFile |
python | plotly__plotly.py | plotly/graph_objs/contour/contours/_labelfont.py | {
"start": 233,
"end": 10060
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour.contours"
_path_str = "contour.contours.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.contours.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super().__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.contours.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.contours.Labelfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Labelfont |
python | falconry__falcon | tests/test_middleware.py | {
"start": 32126,
"end": 35473
} | class ____(TestMiddleware):
def setup_method(self, method):
super().setup_method(method)
def _make_client(self, asgi, util, independent_middleware=True):
mw = [
RequestTimeMiddleware(),
ResponseCacheMiddleware(),
TransactionIdMiddleware(),
]
app = util.create_app(
asgi, middleware=mw, independent_middleware=independent_middleware
)
app.add_route('/', MiddlewareClassResource())
app.add_route('/cached', MiddlewareClassResource())
app.add_route('/cached/resource', MiddlewareClassResource())
return testing.TestClient(app)
def test_process_request_not_cached(self, asgi, util):
response = self._make_client(asgi, util).simulate_get('/')
assert response.status == falcon.HTTP_200
assert response.json == _EXPECTED_BODY
assert 'transaction_id' in context
assert 'resource_transaction_id' in context
assert 'mid_time' in context
assert 'end_time' in context
@pytest.mark.parametrize('independent_middleware', [True, False])
def test_process_request_cached(self, asgi, util, independent_middleware):
response = self._make_client(asgi, util, independent_middleware).simulate_get(
'/cached'
)
assert response.status == falcon.HTTP_200
assert response.json == ResponseCacheMiddleware.PROCESS_REQUEST_CACHED_BODY
# NOTE(kgriffs): Since TransactionIdMiddleware was ordered after
# ResponseCacheMiddleware, the response short-circuiting should have
# skipped it.
assert 'transaction_id' not in context
assert 'resource_transaction_id' not in context
# NOTE(kgriffs): RequestTimeMiddleware only adds this in
# process_resource(), which should be skipped when
# ResponseCacheMiddleware sets resp.completed = True in
# process_request().
assert 'mid_time' not in context
# NOTE(kgriffs): Short-circuiting does not affect process_response()
assert 'end_time' in context
@pytest.mark.parametrize('independent_middleware', [True, False])
def test_process_resource_cached(self, asgi, util, independent_middleware):
response = self._make_client(asgi, util, independent_middleware).simulate_get(
'/cached/resource'
)
assert response.status == falcon.HTTP_200
assert response.json == ResponseCacheMiddleware.PROCESS_RESOURCE_CACHED_BODY
# NOTE(kgriffs): This should be present because it is added in
# process_request(), but the short-circuit does not occur until
# process_resource().
assert 'transaction_id' in context
# NOTE(kgriffs): Since TransactionIdMiddleware was ordered after
# ResponseCacheMiddleware, the response short-circuiting should have
# skipped it.
assert 'resource_transaction_id' not in context
# NOTE(kgriffs): RequestTimeMiddleware only adds this in
# process_resource(), which will not be skipped in this case because
# RequestTimeMiddleware is ordered before ResponseCacheMiddleware.
assert 'mid_time' in context
# NOTE(kgriffs): Short-circuiting does not affect process_response()
assert 'end_time' in context
| TestShortCircuiting |
python | doocs__leetcode | solution/1000-1099/1086.High Five/Solution.py | {
"start": 0,
"end": 389
} | class ____:
def highFive(self, items: List[List[int]]) -> List[List[int]]:
d = defaultdict(list)
m = 0
for i, x in items:
d[i].append(x)
m = max(m, i)
ans = []
for i in range(1, m + 1):
if xs := d[i]:
avg = sum(nlargest(5, xs)) // 5
ans.append([i, avg])
return ans
| Solution |
python | cython__cython | tests/run/dict_setdefault.py | {
"start": 250,
"end": 3137
} | class ____(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
@cython.test_fail_if_path_exists('//AttributeNode')
@cython.test_assert_path_exists('//PythonCapiCallNode')
@cython.locals(d=dict)
def setdefault1(d, key):
"""
>>> d = {}
>>> setdefault1(d, 1)
>>> len(d)
1
>>> setdefault1(d, 1)
>>> len(d)
1
>>> d[1]
>>> setdefault1(d, Unhashable()) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...I am not hashable...
>>> len(d)
1
>>> h1 = setdefault1(d, Hashable())
>>> len(d)
2
>>> h2 = setdefault1(d, Hashable())
>>> len(d)
2
>>> d[Hashable()]
# CPython's behaviour depends on version and py_debug setting, so just compare to it
>>> py_hashed1 = CountedHashable()
>>> y = {py_hashed1: 5}
>>> py_hashed2 = CountedHashable()
>>> y.setdefault(py_hashed2)
>>> cy_hashed1 = CountedHashable()
>>> y = {cy_hashed1: 5}
>>> cy_hashed2 = CountedHashable()
>>> setdefault1(y, cy_hashed2)
>>> py_hashed1.hash_count - cy_hashed1.hash_count
0
>>> py_hashed2.hash_count - cy_hashed2.hash_count
0
>>> (py_hashed1.eq_count + py_hashed2.eq_count) - (cy_hashed1.eq_count + cy_hashed2.eq_count)
0
"""
return d.setdefault(key)
@cython.test_fail_if_path_exists('//AttributeNode')
@cython.test_assert_path_exists('//PythonCapiCallNode')
@cython.locals(d=dict)
def setdefault2(d, key, value):
"""
>>> d = {}
>>> setdefault2(d, 1, 2)
2
>>> len(d)
1
>>> setdefault2(d, 1, 2)
2
>>> len(d)
1
>>> l = setdefault2(d, 2, [])
>>> len(d)
2
>>> l.append(1)
>>> setdefault2(d, 2, [])
[1]
>>> len(d)
2
>>> setdefault2(d, Unhashable(), 1) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...I am not hashable...
>>> h1 = setdefault2(d, Hashable(), 55)
>>> len(d)
3
>>> h2 = setdefault2(d, Hashable(), 66)
>>> len(d)
3
>>> d[Hashable()]
55
# CPython's behaviour depends on version and py_debug setting, so just compare to it
>>> py_hashed1 = CountedHashable()
>>> y = {py_hashed1: 5}
>>> py_hashed2 = CountedHashable()
>>> y.setdefault(py_hashed2, [])
[]
>>> cy_hashed1 = CountedHashable()
>>> y = {cy_hashed1: 5}
>>> cy_hashed2 = CountedHashable()
>>> setdefault2(y, cy_hashed2, [])
[]
>>> py_hashed1.hash_count - cy_hashed1.hash_count
0
>>> py_hashed2.hash_count - cy_hashed2.hash_count
0
>>> (py_hashed1.eq_count + py_hashed2.eq_count) - (cy_hashed1.eq_count + cy_hashed2.eq_count)
0
"""
return d.setdefault(key, value)
| CountedHashable |
python | zostera__django-bootstrap4 | tests/test_utils.py | {
"start": 143,
"end": 1224
} | class ____(TestCase):
def test_add_css_class(self):
css_classes = "one two"
css_class = "three four"
classes = add_css_class(css_classes, css_class)
self.assertEqual(classes, "one two three four")
classes = add_css_class(css_classes, css_class, prepend=True)
self.assertEqual(classes, "three four one two")
def test_text_value(self):
self.assertEqual(text_value(""), "")
self.assertEqual(text_value(" "), " ")
self.assertEqual(text_value(None), "")
self.assertEqual(text_value(1), "1")
def test_text_concat(self):
self.assertEqual(text_concat(1, 2), "12")
self.assertEqual(text_concat(1, 2, separator="="), "1=2")
self.assertEqual(text_concat(None, 2, separator="="), "2")
def test_render_tag(self):
self.assertEqual(render_tag("span"), "<span></span>")
self.assertEqual(render_tag("span", content="foo"), "<span>foo</span>")
self.assertEqual(render_tag("span", attrs={"bar": 123}, content="foo"), '<span bar="123">foo</span>')
| UtilsTest |
python | django-haystack__django-haystack | test_haystack/multipleindex/routers.py | {
"start": 42,
"end": 225
} | class ____(BaseRouter):
def for_write(self, instance=None, **hints):
if instance and instance._meta.app_label == "multipleindex":
return "solr"
| MultipleIndexRouter |
python | walkccc__LeetCode | solutions/54. Spiral Matrix/54.py | {
"start": 0,
"end": 810
} | class ____:
def spiralOrder(self, matrix: list[list[int]]) -> list[int]:
if not matrix:
return []
m = len(matrix)
n = len(matrix[0])
ans = []
r1 = 0
c1 = 0
r2 = m - 1
c2 = n - 1
# Repeatedly add matrix[r1..r2][c1..c2] to `ans`.
while len(ans) < m * n:
j = c1
while j <= c2 and len(ans) < m * n:
ans.append(matrix[r1][j])
j += 1
i = r1 + 1
while i <= r2 - 1 and len(ans) < m * n:
ans.append(matrix[i][c2])
i += 1
j = c2
while j >= c1 and len(ans) < m * n:
ans.append(matrix[r2][j])
j -= 1
i = r2 - 1
while i >= r1 + 1 and len(ans) < m * n:
ans.append(matrix[i][c1])
i -= 1
r1 += 1
c1 += 1
r2 -= 1
c2 -= 1
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/sensor/sensor_builder.py | {
"start": 2192,
"end": 2442
} | class ____:
"""A cursor that stores the last effective timestamp and the last polled dag id."""
end_date_gte: Optional[float] = None
end_date_lte: Optional[float] = None
dag_query_offset: Optional[int] = None
| AirflowPollingSensorCursor |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 32338,
"end": 35239
} | class ____(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function."""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative."""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent."""
return ArcTangent1D(
amplitude=self.amplitude, frequency=self.frequency, phase=self.phase
)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
"""
bbox = [
(-1 / 4 - self.phase) / self.frequency,
(1 / 4 - self.phase) / self.frequency,
]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
| Tangent1D |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/gcs.py | {
"start": 9804,
"end": 13760
} | class ____(BaseSensorOperator):
"""
Checks for the existence of GCS objects at a given prefix, passing matches via XCom.
When files matching the given prefix are found, the poke method's criteria will be
fulfilled and the matching objects will be returned from the operator and passed
through XCom for downstream tasks.
:param bucket: The Google Cloud Storage bucket where the object is.
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run sensor in deferrable mode
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
bucket: str,
prefix: str,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self._matches: list[str] = []
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Checking for existence of object: %s, %s", self.bucket, self.prefix)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
self._matches = hook.list(self.bucket, prefix=self.prefix)
return bool(self._matches)
def execute(self, context: Context):
"""Overridden to allow matches to be passed."""
self.log.info("Checking for existence of object: %s, %s", self.bucket, self.prefix)
if not self.deferrable:
super().execute(context)
return self._matches
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSPrefixBlobTrigger(
bucket=self.bucket,
prefix=self.prefix,
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
else:
return self._matches
def execute_complete(self, context: dict[str, Any], event: dict[str, str | list[str]]) -> str | list[str]:
"""Return immediately and rely on trigger to throw a success event. Callback for the trigger."""
self.log.info("Resuming from trigger and checking status")
if event["status"] == "success":
return event["matches"]
raise AirflowException(event["message"])
def get_time():
"""Act as a wrapper of datetime.datetime.now to simplify mocking in the unittests."""
return datetime.now()
@poke_mode_only
| GCSObjectsWithPrefixExistenceSensor |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_test.py | {
"start": 1069,
"end": 5017
} | class ____(test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=g-missing-super-call
cls._gpu_available = test_util.is_gpu_available()
@test_util.run_in_graph_and_eager_modes
def testConstructorFromSparseTensor(self):
if not self._gpu_available:
return
a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
a_values = [1.0, 5.0, -1.0, -2.0]
a_dense_shape = [5, 6]
a_st = sparse_tensor.SparseTensor(a_indices, a_values, a_dense_shape)
a_st = math_ops.cast(a_st, dtypes.float32)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_st)
self.assertEqual(a_sm.shape, a_dense_shape)
a_st_rt = a_sm.to_sparse_tensor()
a_st_rt = self.evaluate(a_st_rt)
self.assertAllEqual(a_indices, a_st_rt.indices)
self.assertAllClose(a_values, a_st_rt.values)
self.assertAllEqual(a_dense_shape, a_st_rt.dense_shape)
@test_util.run_in_graph_and_eager_modes
def testConstructorFromDenseTensorNoIndices(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [5, 7, 13]
a_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats)
self.assertEqual(a_sm.shape, a_mats.shape)
a_sm_rt = a_sm.to_dense()
a_sm_nnz = a_sm.nnz()
a_sm_nnz, a_sm_rt = self.evaluate([a_sm_nnz, a_sm_rt])
# Count number of nonzero entries for each batch using bincount.
nz = np.bincount(a_mats.nonzero()[0], minlength=a_mats.shape[0])
self.assertAllEqual(nz, a_sm_nnz)
self.assertAllClose(a_mats, a_sm_rt)
@test_util.run_in_graph_and_eager_modes
def testConstructorFromDenseTensorWithIndices(self):
if not self._gpu_available:
return
dense_shape = [5, 7, 13]
a_mats = np.random.randn(*dense_shape).astype(np.float32)
indices = np.array([[0, 0, 0],
[1, 0, 0]], dtype=np.int64)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats, indices=indices)
self.assertEqual(a_sm.shape, a_mats.shape)
a_sm_st = a_sm.to_sparse_tensor()
a_sm_st = self.evaluate(a_sm_st)
# Count number of nonzero entries for each batch using bincount.
self.assertAllEqual(indices, a_sm_st.indices)
self.assertAllEqual(dense_shape, a_sm.shape)
self.assertAllEqual(dense_shape, a_sm_st.dense_shape)
self.assertAllClose([a_mats[tuple(x)] for x in indices], a_sm_st.values)
@test_util.run_in_graph_and_eager_modes
def testConj(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m.real > 0)
dense_shape = [5, 7, 13]
a_mats = sparsify(
(np.random.randn(*dense_shape) + 1.j * np.random.randn(*dense_shape))
.astype(np.complex64))
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats)
a_sm_conj = a_sm.conj()
self.assertIsInstance(a_sm_conj, sparse_csr_matrix_ops.CSRSparseMatrix)
a_sm_conj_dense = a_sm_conj.to_dense()
a_sm_conj_dense = self.evaluate(a_sm_conj_dense)
self.assertAllClose(a_mats.conj(), a_sm_conj_dense)
@test_util.run_in_graph_and_eager_modes
def testTranspose(self):
if not self._gpu_available:
return
for conjugate in False, True:
sparsify = lambda m: m * (m > 0)
dense_shape = [5, 7, 13]
a_mats = sparsify((np.random.randn(*dense_shape) +
1.j * np.random.randn(*dense_shape))).astype(
np.complex64)
expected = np.transpose(a_mats, (0, 2, 1))
if conjugate:
expected = np.conj(expected)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats)
if conjugate:
a_sm_t = a_sm.hermitian_transpose()
else:
a_sm_t = a_sm.transpose()
self.assertIsInstance(a_sm_t, sparse_csr_matrix_ops.CSRSparseMatrix)
a_sm_t_dense = a_sm_t.to_dense()
a_sm_t_dense = self.evaluate(a_sm_t_dense)
self.assertAllClose(expected, a_sm_t_dense)
| CSRSparseMatrixTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/eventbridge.py | {
"start": 3590,
"end": 6717
} | class ____(AwsBaseOperator[EventBridgeHook]):
"""
Create or update a specified EventBridge rule.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EventBridgePutRuleOperator`
:param name: name of the rule to create or update (required)
:param description: description of the rule
:param event_bus_name: name or ARN of the event bus to associate with this rule
:param event_pattern: pattern of events to be matched to this rule
:param role_arn: the Amazon Resource Name of the IAM role associated with the rule
:param schedule_expression: the scheduling expression (for example, a cron or rate expression)
:param state: indicates whether rule is set to be "ENABLED" or "DISABLED"
:param tags: list of key-value pairs to associate with the rule
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.htmlt
"""
aws_hook_class = EventBridgeHook
template_fields: Sequence[str] = aws_template_fields(
"name",
"description",
"event_bus_name",
"event_pattern",
"role_arn",
"schedule_expression",
"state",
"tags",
)
def __init__(
self,
*,
name: str,
description: str | None = None,
event_bus_name: str | None = None,
event_pattern: str | None = None,
role_arn: str | None = None,
schedule_expression: str | None = None,
state: str | None = None,
tags: list | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.name = name
self.description = description
self.event_bus_name = event_bus_name
self.event_pattern = event_pattern
self.role_arn = role_arn
self.schedule_expression = schedule_expression
self.state = state
self.tags = tags
def execute(self, context: Context):
self.log.info('Sending rule "%s" to EventBridge.', self.name)
return self.hook.put_rule(
name=self.name,
description=self.description,
event_bus_name=self.event_bus_name,
event_pattern=self.event_pattern,
role_arn=self.role_arn,
schedule_expression=self.schedule_expression,
state=self.state,
tags=self.tags,
)
| EventBridgePutRuleOperator |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 4964,
"end": 5396
} | class ____(NotGeneratorCallbackSpider):
name = "NotGeneratorCallbackSpiderMiddlewareRightAfterSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
LogExceptionMiddleware: 100000,
},
}
# ================================================================================
# (4) exceptions from a middleware process_spider_output method (generator)
| NotGeneratorCallbackSpiderMiddlewareRightAfterSpider |
python | RaRe-Technologies__gensim | gensim/test/test_coherencemodel.py | {
"start": 653,
"end": 13506
} | class ____(unittest.TestCase):
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = common_texts
dictionary = common_dictionary
corpus = common_corpus
def setUp(self):
# Suppose given below are the topics which two different LdaModels come up with.
# `topics1` is clearly better as it has a clear distinction between system-human
# interaction and graphs. Hence both the coherence measures for `topics1` should be
# greater.
self.topics1 = [
['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']
]
self.topics2 = [
['user', 'graph', 'minors', 'system'],
['time', 'graph', 'survey', 'minors']
]
self.topics3 = [
['token', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']
]
# using this list the model should be unable to interpret topic
# as either a list of tokens or a list of ids
self.topics4 = [
['not a token', 'not an id', 'tests using', "this list"],
['should raise', 'an error', 'to pass', 'correctly']
]
# list of topics with unseen words in the dictionary
self.topics5 = [
['aaaaa', 'bbbbb', 'ccccc', 'eeeee'],
['ddddd', 'fffff', 'ggggh', 'hhhhh']
]
self.topicIds1 = []
for topic in self.topics1:
self.topicIds1.append([self.dictionary.token2id[token] for token in topic])
self.ldamodel = LdaModel(
corpus=self.corpus, id2word=self.dictionary, num_topics=2,
passes=0, iterations=0
)
def check_coherence_measure(self, coherence):
"""Check provided topic coherence algorithm on given topics"""
if coherence in BOOLEAN_DOCUMENT_BASED:
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence)
else:
kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence)
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm2 = CoherenceModel(topics=self.topics2, **kwargs)
cm3 = CoherenceModel(topics=self.topics3, **kwargs)
cm4 = CoherenceModel(topics=self.topicIds1, **kwargs)
# check if the same topic always returns the same coherence value
cm5 = CoherenceModel(topics=[self.topics1[0]], **kwargs)
self.assertRaises(ValueError, lambda: CoherenceModel(topics=self.topics4, **kwargs))
self.assertRaises(ValueError, lambda: CoherenceModel(topics=self.topics5, **kwargs))
self.assertEqual(cm1.get_coherence(), cm4.get_coherence())
self.assertEqual(cm1.get_coherence_per_topic()[0], cm5.get_coherence())
self.assertIsInstance(cm3.get_coherence(), np.double)
self.assertGreater(cm1.get_coherence(), cm2.get_coherence())
def testUMass(self):
"""Test U_Mass topic coherence algorithm on given topics"""
self.check_coherence_measure('u_mass')
def testCv(self):
"""Test C_v topic coherence algorithm on given topics"""
self.check_coherence_measure('c_v')
def testCuci(self):
"""Test C_uci topic coherence algorithm on given topics"""
self.check_coherence_measure('c_uci')
def testCnpmi(self):
"""Test C_npmi topic coherence algorithm on given topics"""
self.check_coherence_measure('c_npmi')
def testUMassLdaModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Model"""
# Note that this is just a sanity check because LDA does not guarantee a better coherence
# value on the topics if iterations are increased. This can be seen here:
# https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde
CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')
def testCvLdaModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')
def testCw2vLdaModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDAModel."""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_w2v')
def testCuciLdaModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')
def testCnpmiLdaModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')
def testErrors(self):
"""Test if errors are raised on bad input"""
# not providing dictionary
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
coherence='u_mass'
)
# not providing texts for c_v and instead providing corpus
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
dictionary=self.dictionary, coherence='c_v'
)
# not providing corpus or texts for u_mass
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary,
coherence='u_mass'
)
def testProcesses(self):
get_model = partial(CoherenceModel,
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model, used_cpus = get_model(), mp.cpu_count() - 1
self.assertEqual(model.processes, used_cpus)
for p in range(-2, 1):
self.assertEqual(get_model(processes=p).processes, used_cpus)
for p in range(1, 4):
self.assertEqual(get_model(processes=p).processes, p)
def testPersistence(self):
fname = get_tmpfile('gensim_models_coherence.tst')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_coherence.tst.gz')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingCorpus(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingTexts(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, texts=self.texts, dictionary=self.dictionary, coherence='c_v'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testAccumulatorCachingSameSizeTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics2
self.assertEqual(None, cm1._accumulator)
def testAccumulatorCachingTopicSubsets(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = [t[:2] for t in self.topics1]
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
def testAccumulatorCachingWithModelSetting(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
cm1.model = self.ldamodel
topics = []
for topic in self.ldamodel.state.get_lambda():
bestn = argsort(topic, topn=cm1.topn, reverse=True)
topics.append(bestn)
self.assertTrue(np.array_equal(topics, cm1.topics))
self.assertIsNone(cm1._accumulator)
def testAccumulatorCachingWithTopnSettingGivenTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
# Topics should not have been truncated, so topn settings below 5 should work
cm1.topn = 4
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(4, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
with self.assertRaises(ValueError):
cm1.topn = 6 # can't expand topics any further without model
def testAccumulatorCachingWithTopnSettingGivenModel(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(model=self.ldamodel, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
cm1.topn = 6 # should be able to expand given the model
self.assertEqual(6, len(cm1.topics[0]))
def testCompareCoherenceForTopics(self):
topics = [self.topics1, self.topics2]
cm = CoherenceModel.for_topics(
topics, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for topic_list in topics:
cm.topics = topic_list
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_model_topics(topics)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertGreater(coherence1, coherence2)
def testCompareCoherenceForModels(self):
models = [self.ldamodel, self.ldamodel]
cm = CoherenceModel.for_models(
models, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for model in models:
cm.model = model
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_models(models)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertAlmostEqual(coherence1, coherence2, places=4)
def testEmptyList(self):
"""Test if CoherenceModel works with document without tokens"""
texts = self.texts + [[]]
cm = CoherenceModel(model=self.ldamodel, texts=texts, coherence="c_v", processes=1)
cm.get_coherence()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestCoherenceModel |
python | openai__openai-python | src/openai/types/beta/threads/run.py | {
"start": 1378,
"end": 1640
} | class ____(BaseModel):
submit_tool_outputs: RequiredActionSubmitToolOutputs
"""Details on the tool outputs needed for this run to continue."""
type: Literal["submit_tool_outputs"]
"""For now, this is always `submit_tool_outputs`."""
| RequiredAction |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 45361,
"end": 50698
} | class ____(
ValueChannelMixin, core.ValueDefWithConditionMarkPropFieldOrDatumDefnumber
):
"""
AngleValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "angle"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> AngleValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> AngleValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
| AngleValue |
python | pypa__pip | src/pip/_internal/req/pep723.py | {
"start": 161,
"end": 1219
} | class ____(ValueError):
"""Raised to indicate a problem when parsing PEP 723 metadata from a script"""
def __init__(self, msg: str) -> None:
self.msg = msg
def pep723_metadata(scriptfile: str) -> dict[str, Any]:
with open(scriptfile) as f:
script = f.read()
name = "script"
matches = list(
filter(lambda m: m.group("type") == name, re.finditer(REGEX, script))
)
if len(matches) > 1:
raise PEP723Exception(f"Multiple {name!r} blocks found in {scriptfile!r}")
elif len(matches) == 1:
content = "".join(
line[2:] if line.startswith("# ") else line[1:]
for line in matches[0].group("content").splitlines(keepends=True)
)
try:
metadata = tomllib.loads(content)
except Exception as exc:
raise PEP723Exception(f"Failed to parse TOML in {scriptfile!r}") from exc
else:
raise PEP723Exception(
f"File does not contain {name!r} metadata: {scriptfile!r}"
)
return metadata
| PEP723Exception |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/composition.py | {
"start": 28786,
"end": 41500
} | class ____:
"""The return value for a dynamic output when invoking a node in a composition function.
Must be unwrapped by invoking map or collect.
"""
def __init__(self, node_name: str, output_name: str, node_type: str):
self.node_name = check.str_param(node_name, "node_name")
self.output_name = check.str_param(output_name, "output_name")
self.node_type = check.str_param(node_type, "node_type")
def describe_node(self) -> str:
return f"{self.node_type} '{self.node_name}'"
def map(
self, fn: Callable
) -> Union[
"InvokedNodeDynamicOutputWrapper", tuple["InvokedNodeDynamicOutputWrapper", ...], None
]:
check.is_callable(fn)
result = fn(InvokedNodeOutputHandle(self.node_name, self.output_name, self.node_type))
if isinstance(result, InvokedNodeOutputHandle):
return InvokedNodeDynamicOutputWrapper(
result.node_name, result.output_name, result.node_type
)
elif isinstance(result, tuple) and all(
map(lambda item: isinstance(item, InvokedNodeOutputHandle), result)
):
return tuple(
map(
lambda item: InvokedNodeDynamicOutputWrapper(
item.node_name, item.output_name, item.node_type
),
result,
)
)
elif result is None:
return None
elif isinstance(result, InvokedNodeDynamicOutputWrapper):
return result
else:
check.failed(
"Could not handle output from map function invoked on "
f"{self.node_name}:{self.output_name}, received {result}"
)
def collect(self) -> DynamicFanIn:
return DynamicFanIn(self.node_name, self.output_name)
def unwrap_for_composite_mapping(self) -> InvokedNodeOutputHandle:
return InvokedNodeOutputHandle(self.node_name, self.output_name, self.node_type)
def __iter__(self) -> NoReturn:
raise DagsterInvariantViolationError(
f'Attempted to iterate over an {self.__class__.__name__}. This object represents the dynamic output "{self.output_name}" '
f'from the {self.describe_node()}. Use the "map" method on this object to create '
"downstream dependencies that will be cloned for each DynamicOut "
"that is resolved at runtime."
)
def __getitem__(self, idx) -> NoReturn:
raise DagsterInvariantViolationError(
f'Attempted to index in to an {self.__class__.__name__}. This object represents the dynamic out "{self.output_name}" '
f'from the {self.describe_node()}. Use the "map" method on this object to create '
"downstream dependencies that will be cloned for each DynamicOut "
"that is resolved at runtime."
)
def alias(self, _) -> NoReturn:
raise DagsterInvariantViolationError(
f"In {current_context().source} {current_context().name}, attempted to call alias method for {self.__class__.__name__}. This object represents"
f' the dynamic out "{self.output_name}" from the already invoked {self.describe_node()}. Consider checking'
" the location of parentheses."
)
def with_hooks(self, _) -> NoReturn:
raise DagsterInvariantViolationError(
f"In {current_context().source} {current_context().name}, attempted to call hook method for {self.__class__.__name__}. This object represents"
f' the dynamic out "{self.output_name}" from the already invoked {self.describe_node()}. Consider checking'
" the location of parentheses."
)
def composite_mapping_from_output(
output: Any,
output_defs: Sequence[OutputDefinition],
node_name: str,
decorator_name: str,
) -> Optional[Mapping[str, OutputMapping]]:
# single output
if isinstance(output, InvokedNodeOutputHandle):
if len(output_defs) == 1:
defn = output_defs[0]
return {defn.name: defn.mapping_from(output.node_name, output.output_name)}
else:
raise DagsterInvalidDefinitionError(
f"Returned a single output ({output.node_name}.{output.output_name}) in "
f"{decorator_name} '{node_name}' but {len(output_defs)} outputs are defined. "
"Return a dict to map defined outputs."
)
elif isinstance(output, InvokedNodeDynamicOutputWrapper):
if len(output_defs) == 1:
defn = output_defs[0]
return {
defn.name: defn.mapping_from(
output.node_name, output.output_name, from_dynamic_mapping=True
)
}
else:
raise DagsterInvalidDefinitionError(
f"Returned a single output ({output.node_name}.{output.output_name}) in "
f"{decorator_name} '{node_name}' but {len(output_defs)} outputs are defined. "
"Return a dict to map defined outputs."
)
output_mapping_dict = {}
output_def_dict = {output_def.name: output_def for output_def in output_defs}
# tuple returned directly
if isinstance(output, tuple) and all(
map(lambda item: isinstance(item, InvokedNodeOutputHandle), output)
):
for i, output_name in enumerate(output_def_dict.keys()):
handle = output[i]
# map output defined on graph to the actual output defined on the op
output_mapping_dict[output_name] = output_def_dict[output_name].mapping_from(
handle.node_name, handle.output_name
)
return output_mapping_dict
# mapping dict
if isinstance(output, dict):
for name, handle in output.items():
if name not in output_def_dict:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{node_name}' referenced key {name} which does not match any"
f" defined outputs. Valid options are: {list(output_def_dict.keys())}"
)
if isinstance(handle, InvokedNodeOutputHandle):
output_mapping_dict[name] = output_def_dict[name].mapping_from(
handle.node_name, handle.output_name
)
elif isinstance(handle, InvokedNodeDynamicOutputWrapper):
output_mapping_dict[name] = output_def_dict[name].mapping_from(
handle.node_name, handle.output_name, from_dynamic_mapping=True
)
else:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{node_name}' returned problematic dict entry under "
f"key {name} of type {type(handle)}. Dict values must be outputs of "
"invoked nodes"
)
return output_mapping_dict
# error
if output is not None:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{node_name}' returned problematic value "
f"of type {type(output)}. Expected return value from invoked node or dict mapping "
"output name to return values from invoked nodes"
)
return None
def do_composition(
decorator_name: str,
graph_name: str,
fn: Callable[..., Any],
provided_input_defs: Sequence[InputDefinition],
provided_output_defs: Optional[Sequence[OutputDefinition]],
config_mapping: Optional[ConfigMapping],
ignore_output_from_composition_fn: bool,
) -> tuple[
Sequence[InputMapping],
Sequence[OutputMapping],
DependencyMapping[NodeInvocation],
Sequence[NodeDefinition],
Optional[ConfigMapping],
Sequence[str],
Mapping[str, Mapping[str, "AssetsDefinition"]],
]:
"""This a function used by both @job and @graph to implement their composition
function which is our DSL for constructing a dependency graph.
Args:
decorator_name (str): Name of the calling decorator. e.g. "@graph" or "@job"
graph_name (str): User-defined name of the definition being constructed
fn (Callable): The composition function to be called.
provided_input_defs(List[InputDefinition]): List of input definitions
explicitly provided to the decorator by the user.
provided_output_defs(List[OutputDefinition]): List of output definitions
explicitly provided to the decorator by the user.
config_mapping (Any): Config mapping provided to decorator by user. In
job/graph case, this would have been constructed from a user-provided
config_schema and config_fn.
ignore_output_from_composite_fn(Bool): Because of backwards compatibility
issues, jobs ignore the return value out of the mapping if
the user has not explicitly provided the output definitions.
This should be removed in 0.11.0.
"""
from dagster._core.definitions.decorators.op_decorator import (
NoContextDecoratedOpFunction,
resolve_checked_op_fn_inputs,
)
actual_output_defs: Sequence[OutputDefinition]
if provided_output_defs is None:
outputs_are_explicit = False
actual_output_defs = [OutputDefinition.create_from_inferred(infer_output_props(fn))]
elif len(provided_output_defs) == 1:
outputs_are_explicit = True
actual_output_defs = [provided_output_defs[0].combine_with_inferred(infer_output_props(fn))]
else:
outputs_are_explicit = True
actual_output_defs = provided_output_defs
compute_fn = NoContextDecoratedOpFunction(fn)
actual_input_defs = resolve_checked_op_fn_inputs(
decorator_name=decorator_name,
fn_name=graph_name,
compute_fn=compute_fn,
explicit_input_defs=provided_input_defs,
exclude_nothing=False,
)
kwargs = {input_def.name: InputMappingNode(input_def) for input_def in actual_input_defs}
output = None
returned_mapping = None
enter_composition(graph_name, decorator_name)
try:
output = fn(**kwargs)
if ignore_output_from_composition_fn:
output = None
returned_mapping = composite_mapping_from_output(
output, actual_output_defs, graph_name, decorator_name
)
finally:
context = exit_composition(returned_mapping)
check.invariant(
context.name == graph_name,
"Composition context stack desync: received context for "
f'"{context.name}" expected "{graph_name}"',
)
# line up mappings in definition order
input_mappings = []
for defn in actual_input_defs:
mappings = [
mapping for mapping in context.input_mappings if mapping.graph_input_name == defn.name
]
if len(mappings) == 0:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{graph_name}' has unmapped input '{defn.name}'. "
"Remove it or pass it to the appropriate op/graph invocation."
)
input_mappings += mappings
output_mappings = []
for defn in actual_output_defs:
mapping = context.output_mapping_dict.get(defn.name)
if mapping is None:
# if we inferred output_defs we will be flexible and either take a mapping or not
if not outputs_are_explicit:
continue
# if we are ignoring the output, disregard this unsatisfied mapping
if ignore_output_from_composition_fn:
continue
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{graph_name}' has unmapped output '{defn.name}'. "
"Remove it or return a value from the appropriate op/graph invocation."
)
output_mappings.append(mapping)
return (
input_mappings,
output_mappings,
context.dependencies,
context.node_defs,
config_mapping,
compute_fn.positional_inputs(),
context.node_input_assets,
)
def get_validated_config_mapping(
name: str,
config_schema: Any,
config_fn: Optional[Callable[[Any], Any]],
decorator_name: str,
) -> Optional[ConfigMapping]:
if config_fn is None and config_schema is None:
return None
elif config_fn is not None:
return ConfigMapping(config_fn=config_fn, config_schema=config_schema)
else:
raise DagsterInvalidDefinitionError(
f"{decorator_name} '{name}' defines a configuration schema but does not "
"define a configuration function."
)
| InvokedNodeDynamicOutputWrapper |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 141412,
"end": 149544
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
email_notifications: Optional[JobEmailNotifications] = Field(
None,
description=(
"An optional set of email addresses that is notified when runs of this job"
" begin or complete as well as when this job is deleted. The default"
" behavior is to not send any emails."
),
)
format: Optional[Literal["SINGLE_TASK", "MULTI_TASK"]] = Field(
default=None,
description=(
"Used to tell what is the format of the job. This field is ignored in"
" Create/Update/Reset calls. When using the Jobs API 2.1 this value is"
' always set to `"MULTI_TASK"`.'
),
examples=["MULTI_TASK"],
)
git_source: Optional[GitSource1] = Field(
default=None,
description=(
"This functionality is in Public Preview.\n\nAn optional specification for"
" a remote repository containing the notebooks used by this job's notebook"
" tasks."
),
examples=[
{
"git_branch": "main",
"git_provider": "gitHub",
"git_url": "https://github.com/databricks/databricks-cli",
}
],
)
job_clusters: Optional[List[JobCluster]] = Field(
default=None,
description=(
"A list of job cluster specifications that can be shared and reused by"
" tasks of this job. Libraries cannot be declared in a shared job cluster."
" You must declare dependent libraries in task settings."
),
examples=[
[
{
"job_cluster_key": "auto_scaling_cluster",
"new_cluster": {
"autoscale": {"max_workers": 16, "min_workers": 2},
"aws_attributes": {
"availability": "SPOT",
"zone_id": "us-west-2a",
},
"node_type_id": "i3.xlarge",
"spark_conf": {"spark.speculation": True},
"spark_version": "7.3.x-scala2.12",
},
}
]
],
max_length=100,
)
max_concurrent_runs: Optional[int] = Field(
None,
description=(
"An optional maximum allowed number of concurrent runs of the job.\n\nSet"
" this value if you want to be able to execute multiple runs of the same"
" job concurrently. This is useful for example if you trigger your job on a"
" frequent schedule and want to allow consecutive runs to overlap with each"
" other, or if you want to trigger multiple runs which differ by their"
" input parameters.\n\nThis setting affects only new runs. For example,"
" suppose the job’s concurrency is 4 and there are 4 concurrent active"
" runs. Then setting the concurrency to 3 won’t kill any of the active"
" runs. However, from then on, new runs are skipped unless there are fewer"
" than 3 active runs.\n\nThis value cannot exceed 1000\\. Setting this"
" value to 0 causes all new runs to be skipped. The default behavior is to"
" allow only 1 concurrent run."
),
examples=[10],
)
name: Optional[str] = Field(
"Untitled",
description="An optional name for the job.",
examples=["A multitask job"],
)
schedule: Optional[CronSchedule] = Field(
None,
description=(
"An optional periodic schedule for this job. The default behavior is that"
" the job only runs when triggered by clicking “Run Now” in the Jobs UI or"
" sending an API request to `runNow`."
),
)
tags: Optional[Dict[str, Any]] = Field(
"{}",
description=(
"A map of tags associated with the job. These are forwarded to the cluster"
" as cluster tags for jobs clusters, and are subject to the same"
" limitations as cluster tags. A maximum of 25 tags can be added to the"
" job."
),
examples=[{"cost-center": "engineering", "team": "jobs"}],
)
tasks: Optional[List[JobTaskSettings]] = Field(
default=None,
description="A list of task specifications to be executed by this job.",
examples=[
[
{
"depends_on": [],
"description": "Extracts session data from events",
"existing_cluster_id": "0923-164208-meows279",
"libraries": [{"jar": "dbfs:/mnt/databricks/Sessionize.jar"}],
"max_retries": 3,
"min_retry_interval_millis": 2000,
"retry_on_timeout": False,
"spark_jar_task": {
"main_class_name": "com.databricks.Sessionize",
"parameters": ["--data", "dbfs:/path/to/data.json"],
},
"task_key": "Sessionize",
"timeout_seconds": 86400,
},
{
"depends_on": [],
"description": "Ingests order data",
"job_cluster_key": "auto_scaling_cluster",
"libraries": [{"jar": "dbfs:/mnt/databricks/OrderIngest.jar"}],
"max_retries": 3,
"min_retry_interval_millis": 2000,
"retry_on_timeout": False,
"spark_jar_task": {
"main_class_name": "com.databricks.OrdersIngest",
"parameters": ["--data", "dbfs:/path/to/order-data.json"],
},
"task_key": "Orders_Ingest",
"timeout_seconds": 86400,
},
{
"depends_on": [
{"task_key": "Orders_Ingest"},
{"task_key": "Sessionize"},
],
"description": "Matches orders with user sessions",
"max_retries": 3,
"min_retry_interval_millis": 2000,
"new_cluster": {
"autoscale": {"max_workers": 16, "min_workers": 2},
"aws_attributes": {
"availability": "SPOT",
"zone_id": "us-west-2a",
},
"node_type_id": "i3.xlarge",
"spark_conf": {"spark.speculation": True},
"spark_version": "7.3.x-scala2.12",
},
"notebook_task": {
"base_parameters": {"age": "35", "name": "John Doe"},
"notebook_path": "/Users/user.name@databricks.com/Match",
"source": "WORKSPACE",
},
"retry_on_timeout": False,
"task_key": "Match",
"timeout_seconds": 86400,
},
]
],
max_length=100,
)
timeout_seconds: Optional[int] = Field(
default=None,
description=(
"An optional timeout applied to each run of this job. The default behavior"
" is to have no timeout."
),
examples=[86400],
)
webhook_notifications: Optional[WebhookNotifications] = Field(
None,
description=(
"A collection of system notification IDs to notify when runs of this job"
" begin or complete. The default behavior is to not send any system"
" notifications."
),
)
parameters: Optional[List[JobParameter]] = Field(
None,
description=("Job-level parameter definitions."),
)
| JobSettings |
python | pydantic__pydantic | tests/benchmarks/shared.py | {
"start": 1542,
"end": 1617
} | class ____(Enum):
RED = 'red'
GREEN = 'green'
BLUE = 'blue'
| Color |
python | pypa__pip | src/pip/_vendor/resolvelib/structs.py | {
"start": 3243,
"end": 4556
} | class ____(Mapping[KT, Iterator[CT]], Generic[RT, CT, KT]):
def __init__(
self,
mapping: Mapping[KT, RT],
accessor: Callable[[RT], Iterable[CT]],
appends: Mapping[KT, Iterable[CT]] | None = None,
) -> None:
self._mapping = mapping
self._accessor = accessor
self._appends: Mapping[KT, Iterable[CT]] = appends or {}
def __repr__(self) -> str:
return "IteratorMapping({!r}, {!r}, {!r})".format(
self._mapping,
self._accessor,
self._appends,
)
def __bool__(self) -> bool:
return bool(self._mapping or self._appends)
def __contains__(self, key: object) -> bool:
return key in self._mapping or key in self._appends
def __getitem__(self, k: KT) -> Iterator[CT]:
try:
v = self._mapping[k]
except KeyError:
return iter(self._appends[k])
return itertools.chain(self._accessor(v), self._appends.get(k, ()))
def __iter__(self) -> Iterator[KT]:
more = (k for k in self._appends if k not in self._mapping)
return itertools.chain(self._mapping, more)
def __len__(self) -> int:
more = sum(1 for k in self._appends if k not in self._mapping)
return len(self._mapping) + more
| IteratorMapping |
python | keras-team__keras | keras/src/layers/regularization/activity_regularization_test.py | {
"start": 105,
"end": 974
} | class ____(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
assert_built_after_instantiation=True,
)
| ActivityRegularizationTest |
python | zarr-developers__zarr-python | src/zarr/storage/_obstore.py | {
"start": 9516,
"end": 9899
} | class ____(TypedDict):
"""Range request with a known start and end byte.
These requests can be multiplexed natively on the Rust side with
`obstore.get_ranges_async`.
"""
original_request_index: int
"""The positional index in the original key_ranges input"""
start: int
"""Start byte offset."""
end: int
"""End byte offset."""
| _BoundedRequest |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_workers.py | {
"start": 38533,
"end": 40952
} | class ____:
async def test_create_work_queue(self, client, work_pool):
response = await client.post(
f"/work_pools/{work_pool.name}/queues",
json=dict(name="test-queue", description="test queue"),
)
assert response.status_code == status.HTTP_201_CREATED, response.text
result = parse_obj_as(WorkQueue, response.json())
assert result.name == "test-queue"
assert result.description == "test queue"
assert result.work_pool_name == work_pool.name
async def test_create_work_queue_with_priority(
self,
client,
session,
work_pool,
):
data = dict(name="my-wpq", priority=99)
response = await client.post(
f"work_pools/{work_pool.name}/queues",
json=data,
)
assert response.status_code == 201, response.text
assert response.json()["priority"] == 99
work_queue_id = response.json()["id"]
work_queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_queue_id
)
assert work_queue.priority == 99
async def test_create_work_queue_with_no_priority_when_low_priority_set(
self,
client,
work_pool,
):
response = await client.post(
f"work_pools/{work_pool.name}/queues", json=dict(name="wpq-1")
)
# priority 2 because the default queue exists
assert response.json()["priority"] == 2
response2 = await client.post(
f"work_pools/{work_pool.name}/queues", json=dict(name="wpq-2")
)
assert response2.json()["priority"] == 3
async def test_create_work_queue_with_no_priority_when_high_priority_set(
self,
client,
session,
work_pool,
):
response = await client.post(
f"work_pools/{work_pool.name}/queues", json=dict(name="wpq-1", priority=99)
)
assert response.json()["priority"] == 99
work_queue_id = response.json()["id"]
response2 = await client.post(
f"work_pools/{work_pool.name}/queues", json=dict(name="wpq-2")
)
assert response2.json()["priority"] == 2
work_queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_queue_id
)
assert work_queue.priority == 99
| TestCreateWorkQueue |
python | OmkarPathak__pygorithm | pygorithm/data_structures/graph.py | {
"start": 3625,
"end": 7566
} | class ____(object):
"""WeightedUndirectedGraph object
A graph with a numerical value (weight) on edges, which
is the same for both directions in an undirected graph.
"""
def __init__(self):
self.graph = {}
self.weights = {}
def add_edge(self, u, v, weight):
"""
Adds the specified edge to this graph. If the edge already exists,
this will only modify the weight (not create duplicates).
:param u: from vertex
:param v: to vertex
:param weight: weight of the edge - type : numeric
"""
changing_weight = (u, v) in self.weights.keys()
self.weights[(u, v)] = weight
self.weights[(v, u)] = weight
if changing_weight:
return
if u in self.graph.keys():
self.graph[u].append(v)
else:
self.graph[u] = [v]
if v in self.graph.keys():
self.graph[v].append(u)
else:
self.graph[v] = [u]
def get_edge_weight(self, u, v):
"""
Gets the weight between u and v if such an edge
exists, or None if it does not.
:param u: one edge
:param v: the other edge
:return: numeric or None
"""
return self.weights.get((u, v), None)
def remove_edge(self, edge, other_edge_or_none=None):
"""
Removes the specified edge from the grid entirely or,
if specified, the connection with one other edge.
Behavior is undefined if the connection does not
exist.
:param edge: the edge to remove
:param other_edge_or_none: an edge connected to edge or none
"""
if other_edge_or_none is not None:
del self.weights[(edge, other_edge_or_none)]
del self.weights[(other_edge_or_none, edge)]
edge_list = self.graph[edge]
other_edge_list = self.graph[other_edge_or_none]
if len(edge_list) == 1:
del self.graph[edge]
else:
self.graph[edge].remove(other_edge_or_none)
if len(other_edge_list) == 1:
del self.graph[other_edge_or_none]
else:
self.graph[other_edge_or_none].remove(edge)
else:
edge_list = self.graph[edge]
del self.graph[edge]
for other_edge in edge_list:
del self.weights[(edge, other_edge)]
del self.weights[(other_edge, edge)]
other_edge_list = self.graph[other_edge]
if len(other_edge_list) == 1:
del self.graph[other_edge]
else:
other_edge_list.remove(edge)
def gridify(self, size, weight):
"""
Constructs connections from a square grid starting at (0, 0)
until (size-1, size-1) with connections between adjacent and
diagonal nodes. Diagonal nodes have a weight of weight*sqrt(2)
:param size: the size of the square grid to construct - type : integer
:param weight: the weight between orthogonal nodes. - type: numeric
:return: None
"""
rt2 = math.sqrt(2)
acceptable_offsets = [
(-1, -1, rt2), (-1, 0, 1), (-1, 1, rt2),
(0, -1, 1), (0, 1, 1),
(1, -1, rt2), (1, 0, 1), (1, 1, rt2)
]
for x in range(0, size):
for y in range(0, size):
for offset in acceptable_offsets:
nx = x + offset[0]
ny = y + offset[1]
if nx >= 0 and ny >= 0 and nx < size and ny < size:
self.add_edge((x, y), (nx, ny), weight * offset[2])
| WeightedUndirectedGraph |
python | prabhupant__python-ds | data_structures/graphs/count_sink_nodes.py | {
"start": 262,
"end": 603
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
def count_sink_nodes(self):
return self.vertices - len(self.graph)
g = Graph(3)
g.add_edge(0, 1)
g.add_edge(0, 2)
print(g.count_sink_nodes()) | Graph |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 39545,
"end": 41117
} | class ____(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to 0.5. A float value or a python
list/tuple of float threshold values in [0, 1]. A threshold is compared
with prediction values to determine the truth value of predictions
(i.e., above the threshold is `true`, below is `false`). One metric
value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result().numpy()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result().numpy()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TrueNegatives()])
```
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super(TrueNegatives, self).__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype)
| TrueNegatives |
python | scrapy__scrapy | tests/test_downloadermiddleware.py | {
"start": 7965,
"end": 8824
} | class ____(TestManagerBase):
"""Middlewares using Deferreds should work"""
@deferred_f_from_coro_f
async def test_deferred(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class DeferredMiddleware:
def cb(self, result):
return result
def process_request(self, request):
d = Deferred()
d.addCallback(self.cb)
d.callback(resp)
return d
async with self.get_mwman() as mwman:
mwman._add_middleware(DeferredMiddleware())
result = await maybe_deferred_to_future(mwman.download(download_func, req))
assert result is resp
assert not download_func.called
| TestMiddlewareUsingDeferreds |
python | PyCQA__pylint | tests/functional/d/dataclass/dataclass_with_default_factory.py | {
"start": 1283,
"end": 1452
} | class ____:
"""Absurd example to test a potential crash found during development."""
attribute: int = lambda this: cast(int, this)(field(default_factory=dict))
| TEST4 |
python | huggingface__transformers | tests/models/d_fine/test_modeling_d_fine.py | {
"start": 27966,
"end": 30457
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None
def test_inference_object_detection_head(self):
model = DFineForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape_logits = torch.Size((1, 300, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_shape_logits)
expected_logits = torch.tensor(
[
[-3.8098, -4.7725, -5.9945],
[-5.2975, -9.4991, -6.1654],
[-5.3502, -3.9532, -6.3631],
]
).to(torch_device)
expected_boxes = torch.tensor(
[
[0.7678, 0.4148, 0.4644],
[0.1691, 0.1987, 0.2124],
[0.2582, 0.5482, 0.4751],
]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, atol=2e-4, rtol=2e-4)
expected_shape_boxes = torch.Size((1, 300, 4))
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=2e-4, rtol=2e-4)
# verify postprocessing
results = image_processor.post_process_object_detection(
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
)[0]
expected_scores = torch.tensor([0.9642, 0.9542, 0.9536, 0.8548], device=torch_device)
expected_labels = [15, 65, 15, 57]
expected_slice_boxes = torch.tensor(
[
[1.3186e01, 5.4130e01, 3.1727e02, 4.7212e02],
[4.0275e01, 7.2975e01, 1.7620e02, 1.1777e02],
[3.4276e02, 2.3428e01, 6.3998e02, 3.7477e02],
[5.8418e-01, 1.1794e00, 6.3933e02, 4.7486e02],
],
device=torch_device,
)
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=1e-4)
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes[:4], atol=1e-3, rtol=1e-4)
| DFineModelIntegrationTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_contextlib.py | {
"start": 39929,
"end": 40137
} | class ____(_TestBaseExitStack, __TestCase):
exit_stack = ExitStack
callback_error_internal_frames = [
('__exit__', 'raise exc'),
('__exit__', 'if cb(*exc_details):'),
]
| TestExitStack |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 6196,
"end": 7170
} | class ____(BadRequest, KeyError):
"""An exception that is used to signal both a :exc:`KeyError` and a
:exc:`BadRequest`. Used by many of the datastructures.
"""
_description = BadRequest.description
#: Show the KeyError along with the HTTP error message in the
#: response. This should be disabled in production, but can be
#: useful in a debug mode.
show_exception = False
def __init__(self, arg: object | None = None, *args: t.Any, **kwargs: t.Any):
super().__init__(*args, **kwargs)
if arg is None:
KeyError.__init__(self)
else:
KeyError.__init__(self, arg)
@property
def description(self) -> str:
if self.show_exception:
return f"{self._description}\n{KeyError.__name__}: {KeyError.__str__(self)}"
return self._description
@description.setter
def description(self, value: str) -> None:
self._description = value
| BadRequestKeyError |
python | walkccc__LeetCode | solutions/514. Freedom Trail/514.py | {
"start": 0,
"end": 780
} | class ____:
def findRotateSteps(self, ring: str, key: str) -> int:
@functools.lru_cache(None)
def dfs(ring: str, index: int) -> int:
"""Returns the number of rotates of ring to match key[index..n)."""
if index == len(key):
return 0
ans = math.inf
# For each ring[i] == key[index], we rotate the ring to match the ring[i]
# with the key[index], then recursively match the newRing with the
# key[index + 1..n).
for i, r in enumerate(ring):
if r == key[index]:
minRotates = min(i, len(ring) - i)
newRing = ring[i:] + ring[:i]
remainingRotates = dfs(newRing, index + 1)
ans = min(ans, minRotates + remainingRotates)
return ans
return dfs(ring, 0) + len(key)
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_vertex_ai.py | {
"start": 2626,
"end": 3534
} | class ____:
def test_class_attributes(self):
assert VertexAIRayClusterListLink.key == EXPECTED_VERTEX_AI_RAY_CLUSTER_LIST_LINK_KEY
assert VertexAIRayClusterListLink.name == EXPECTED_VERTEX_AI_RAY_CLUSTER_LIST_LINK_NAME
assert VertexAIRayClusterListLink.format_str == EXPECTED_VERTEX_AI_RAY_CLUSTER_LIST_LINK_FORMAT_STR
def test_persist(self):
mock_context = mock.MagicMock()
mock_context["ti"] = mock.MagicMock(project_id=TEST_PROJECT_ID)
mock_context["task"] = mock.MagicMock()
VertexAIRayClusterListLink.persist(
context=mock_context,
project_id=TEST_PROJECT_ID,
)
mock_context["ti"].xcom_push.assert_called_once_with(
key=EXPECTED_VERTEX_AI_RAY_CLUSTER_LIST_LINK_KEY,
value={
"project_id": TEST_PROJECT_ID,
},
)
| TestVertexAIRayClusterListLink |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/rock_paper_scissors.py | {
"start": 108,
"end": 4473
} | class ____(MultiAgentEnv):
"""Two-player environment for the famous rock paper scissors game.
# __sphinx_doc_1_end__
Optionally, the "Sheldon Cooper extension" can be activated by passing
`sheldon_cooper_mode=True` into the constructor, in which case two more moves
are allowed: Spock and Lizard. Spock is poisoned by Lizard, disproven by Paper, but
crushes Rock and smashes Scissors. Lizard poisons Spock and eats Paper, but is
decapitated by Scissors and crushed by Rock.
# __sphinx_doc_2_begin__
Both players always move simultaneously over a course of 10 timesteps in total.
The winner of each timestep receives reward of +1, the losing player -1.0.
The observation of each player is the last opponent action.
"""
ROCK = 0
PAPER = 1
SCISSORS = 2
LIZARD = 3
SPOCK = 4
WIN_MATRIX = {
(ROCK, ROCK): (0, 0),
(ROCK, PAPER): (-1, 1),
(ROCK, SCISSORS): (1, -1),
(PAPER, ROCK): (1, -1),
(PAPER, PAPER): (0, 0),
(PAPER, SCISSORS): (-1, 1),
(SCISSORS, ROCK): (-1, 1),
(SCISSORS, PAPER): (1, -1),
(SCISSORS, SCISSORS): (0, 0),
}
# __sphinx_doc_2_end__
WIN_MATRIX.update(
{
# Sheldon Cooper mode:
(LIZARD, LIZARD): (0, 0),
(LIZARD, SPOCK): (1, -1), # Lizard poisons Spock
(LIZARD, ROCK): (-1, 1), # Rock crushes lizard
(LIZARD, PAPER): (1, -1), # Lizard eats paper
(LIZARD, SCISSORS): (-1, 1), # Scissors decapitate lizard
(ROCK, LIZARD): (1, -1), # Rock crushes lizard
(PAPER, LIZARD): (-1, 1), # Lizard eats paper
(SCISSORS, LIZARD): (1, -1), # Scissors decapitate lizard
(SPOCK, SPOCK): (0, 0),
(SPOCK, LIZARD): (-1, 1), # Lizard poisons Spock
(SPOCK, ROCK): (1, -1), # Spock vaporizes rock
(SPOCK, PAPER): (-1, 1), # Paper disproves Spock
(SPOCK, SCISSORS): (1, -1), # Spock smashes scissors
(ROCK, SPOCK): (-1, 1), # Spock vaporizes rock
(PAPER, SPOCK): (1, -1), # Paper disproves Spock
(SCISSORS, SPOCK): (-1, 1), # Spock smashes scissors
}
)
# __sphinx_doc_3_begin__
def __init__(self, config=None):
super().__init__()
self.agents = self.possible_agents = ["player1", "player2"]
# The observations are always the last taken actions. Hence observation- and
# action spaces are identical.
self.observation_spaces = self.action_spaces = {
"player1": gym.spaces.Discrete(3),
"player2": gym.spaces.Discrete(3),
}
self.last_move = None
self.num_moves = 0
# __sphinx_doc_3_end__
self.sheldon_cooper_mode = False
if config.get("sheldon_cooper_mode"):
self.sheldon_cooper_mode = True
self.action_spaces = self.observation_spaces = {
"player1": gym.spaces.Discrete(5),
"player2": gym.spaces.Discrete(5),
}
# __sphinx_doc_4_begin__
def reset(self, *, seed=None, options=None):
self.num_moves = 0
# The first observation should not matter (none of the agents has moved yet).
# Set them to 0.
return {
"player1": 0,
"player2": 0,
}, {} # <- empty infos dict
# __sphinx_doc_4_end__
# __sphinx_doc_5_begin__
def step(self, action_dict):
self.num_moves += 1
move1 = action_dict["player1"]
move2 = action_dict["player2"]
# Set the next observations (simply use the other player's action).
# Note that because we are publishing both players in the observations dict,
# we expect both players to act in the next `step()` (simultaneous stepping).
observations = {"player1": move2, "player2": move1}
# Compute rewards for each player based on the win-matrix.
r1, r2 = self.WIN_MATRIX[move1, move2]
rewards = {"player1": r1, "player2": r2}
# Terminate the entire episode (for all agents) once 10 moves have been made.
terminateds = {"__all__": self.num_moves >= 10}
# Leave truncateds and infos empty.
return observations, rewards, terminateds, {}, {}
# __sphinx_doc_5_end__
| RockPaperScissors |
python | spyder-ide__spyder | spyder/plugins/outlineexplorer/main_widget.py | {
"start": 708,
"end": 1146
} | class ____:
GoToCursor = 'go_to_cursor'
ShowFullPath = 'show_fullpath'
ShowAllFiles = 'show_all_files'
ShowSpecialComments = 'show_comments'
GroupCodeCells = 'group_code_cells'
DisplayVariables = 'display_variables'
FollowCursor = 'follow_cursor'
SortFiles = 'sort_files_alphabetically'
# ---- Main widget
# -----------------------------------------------------------------------------
| OutlineExplorerActions |
python | optuna__optuna | optuna/storages/_grpc/client.py | {
"start": 1524,
"end": 14246
} | class ____(BaseStorage):
"""gRPC client for :func:`~optuna.storages.run_grpc_proxy_server`.
Example:
This is a simple example of using :class:`~optuna.storages.GrpcStorageProxy` with
:func:`~optuna.storages.run_grpc_proxy_server`.
.. code::
import optuna
from optuna.storages import GrpcStorageProxy
storage = GrpcStorageProxy(host="localhost", port=13000)
study = optuna.create_study(storage=storage)
Please refer to the example in :func:`~optuna.storages.run_grpc_proxy_server` for the
server side code.
Args:
host: The hostname of the gRPC server.
port: The port of the gRPC server.
.. warning::
Currently, gRPC storage proxy in combination with an SQLite3 database may cause unexpected
behaviors when calling :func:`optuna.delete_study` due to non-invalidated cache.
"""
def __init__(self, *, host: str = "localhost", port: int = 13000) -> None:
self._host = host
self._port = port
self._setup()
def _setup(self) -> None:
"""Set up the gRPC channel and stub."""
self._channel = create_insecure_channel(self._host, self._port)
self._stub = api_pb2_grpc.StorageServiceStub(self._channel)
self._cache = GrpcClientCache(self._stub)
def wait_server_ready(self, timeout: float | None = None) -> None:
"""Wait until the gRPC server is ready.
Args:
timeout: The maximum time to wait in seconds. If :obj:`None`, wait indefinitely.
"""
try:
with create_insecure_channel(self._host, self._port) as channel:
grpc.channel_ready_future(channel).result(timeout=timeout)
except grpc.FutureTimeoutError as e:
raise ConnectionError("GRPC connection timeout") from e
def close(self) -> None:
"""Close the gRPC channel."""
self._channel.close()
def __getstate__(self) -> dict[Any, Any]:
state = self.__dict__.copy()
del state["_channel"]
del state["_stub"]
del state["_cache"]
return state
def __setstate__(self, state: dict[Any, Any]) -> None:
self.__dict__.update(state)
self._setup()
def create_new_study(
self, directions: Sequence[StudyDirection], study_name: str | None = None
) -> int:
request = api_pb2.CreateNewStudyRequest(
directions=[
api_pb2.MINIMIZE if d == StudyDirection.MINIMIZE else api_pb2.MAXIMIZE
for d in directions
],
study_name=study_name or DEFAULT_STUDY_NAME_PREFIX + str(uuid.uuid4()),
)
try:
response = self._stub.CreateNewStudy(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.ALREADY_EXISTS:
raise DuplicatedStudyError from e
raise
return response.study_id
def delete_study(self, study_id: int) -> None:
request = api_pb2.DeleteStudyRequest(study_id=study_id)
try:
self._stub.DeleteStudy(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
# TODO(c-bata): Fix a cache invalidation issue when using SQLite3
# Please see https://github.com/optuna/optuna/pull/5872/files#r1893708995 for details.
self._cache.delete_study_cache(study_id)
def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None:
request = api_pb2.SetStudyUserAttributeRequest(
study_id=study_id, key=key, value=json.dumps(value)
)
try:
self._stub.SetStudyUserAttribute(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
def set_study_system_attr(self, study_id: int, key: str, value: Any) -> None:
request = api_pb2.SetStudySystemAttributeRequest(
study_id=study_id, key=key, value=json.dumps(value)
)
try:
self._stub.SetStudySystemAttribute(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
def get_study_id_from_name(self, study_name: str) -> int:
request = api_pb2.GetStudyIdFromNameRequest(study_name=study_name)
try:
response = self._stub.GetStudyIdFromName(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return response.study_id
def get_study_name_from_id(self, study_id: int) -> str:
request = api_pb2.GetStudyNameFromIdRequest(study_id=study_id)
try:
response = self._stub.GetStudyNameFromId(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return response.study_name
def get_study_directions(self, study_id: int) -> list[StudyDirection]:
request = api_pb2.GetStudyDirectionsRequest(study_id=study_id)
try:
response = self._stub.GetStudyDirections(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return [
StudyDirection.MINIMIZE if d == api_pb2.MINIMIZE else StudyDirection.MAXIMIZE
for d in response.directions
]
def get_study_user_attrs(self, study_id: int) -> dict[str, Any]:
request = api_pb2.GetStudyUserAttributesRequest(study_id=study_id)
try:
response = self._stub.GetStudyUserAttributes(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return {key: json.loads(value) for key, value in response.user_attributes.items()}
def get_study_system_attrs(self, study_id: int) -> dict[str, Any]:
request = api_pb2.GetStudySystemAttributesRequest(study_id=study_id)
try:
response = self._stub.GetStudySystemAttributes(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return {key: json.loads(value) for key, value in response.system_attributes.items()}
def get_all_studies(self) -> list[FrozenStudy]:
request = api_pb2.GetAllStudiesRequest()
response = self._stub.GetAllStudies(request)
return [
FrozenStudy(
study_id=study.study_id,
study_name=study.study_name,
direction=None,
directions=[
StudyDirection.MINIMIZE if d == api_pb2.MINIMIZE else StudyDirection.MAXIMIZE
for d in study.directions
],
user_attrs={
key: json.loads(value) for key, value in study.user_attributes.items()
},
system_attrs={
key: json.loads(value) for key, value in study.system_attributes.items()
},
)
for study in response.studies
]
def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int:
if template_trial is None:
request = api_pb2.CreateNewTrialRequest(study_id=study_id, template_trial_is_none=True)
else:
request = api_pb2.CreateNewTrialRequest(
study_id=study_id,
template_trial=grpc_servicer._to_proto_trial(template_trial),
template_trial_is_none=False,
)
try:
response = self._stub.CreateNewTrial(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return response.trial_id
def set_trial_param(
self,
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: BaseDistribution,
) -> None:
request = api_pb2.SetTrialParameterRequest(
trial_id=trial_id,
param_name=param_name,
param_value_internal=param_value_internal,
distribution=distribution_to_json(distribution),
)
try:
self._stub.SetTrialParameter(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
elif e.code() == grpc.StatusCode.FAILED_PRECONDITION:
raise UpdateFinishedTrialError from e
elif e.code() == grpc.StatusCode.INVALID_ARGUMENT:
raise ValueError from e
else:
raise
def set_trial_state_values(
self, trial_id: int, state: TrialState, values: Sequence[float] | None = None
) -> bool:
request = api_pb2.SetTrialStateValuesRequest(
trial_id=trial_id,
state=grpc_servicer._to_proto_trial_state(state),
values=values,
)
try:
response = self._stub.SetTrialStateValues(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
elif e.code() == grpc.StatusCode.FAILED_PRECONDITION:
raise UpdateFinishedTrialError from e
else:
raise
return response.trial_updated
def set_trial_intermediate_value(
self, trial_id: int, step: int, intermediate_value: float
) -> None:
request = api_pb2.SetTrialIntermediateValueRequest(
trial_id=trial_id, step=step, intermediate_value=intermediate_value
)
try:
self._stub.SetTrialIntermediateValue(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
elif e.code() == grpc.StatusCode.FAILED_PRECONDITION:
raise UpdateFinishedTrialError from e
else:
raise
def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None:
request = api_pb2.SetTrialUserAttributeRequest(
trial_id=trial_id, key=key, value=json.dumps(value)
)
try:
self._stub.SetTrialUserAttribute(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
elif e.code() == grpc.StatusCode.FAILED_PRECONDITION:
raise UpdateFinishedTrialError from e
else:
raise
def set_trial_system_attr(self, trial_id: int, key: str, value: Any) -> None:
request = api_pb2.SetTrialSystemAttributeRequest(
trial_id=trial_id, key=key, value=json.dumps(value)
)
try:
self._stub.SetTrialSystemAttribute(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
elif e.code() == grpc.StatusCode.FAILED_PRECONDITION:
raise UpdateFinishedTrialError from e
else:
raise
def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int:
request = api_pb2.GetTrialIdFromStudyIdTrialNumberRequest(
study_id=study_id, trial_number=trial_number
)
try:
response = self._stub.GetTrialIdFromStudyIdTrialNumber(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return response.trial_id
def get_trial(self, trial_id: int) -> FrozenTrial:
request = api_pb2.GetTrialRequest(trial_id=trial_id)
try:
response = self._stub.GetTrial(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise KeyError from e
raise
return grpc_servicer._from_proto_trial(response.trial)
def get_all_trials(
self,
study_id: int,
deepcopy: bool = True,
states: Container[TrialState] | None = None,
) -> list[FrozenTrial]:
trials = self._cache.get_all_trials(study_id, states)
return copy.deepcopy(trials) if deepcopy else trials
| GrpcStorageProxy |
python | doocs__leetcode | solution/3100-3199/3133.Minimum Array End/Solution.py | {
"start": 0,
"end": 253
} | class ____:
def minEnd(self, n: int, x: int) -> int:
n -= 1
ans = x
for i in range(31):
if x >> i & 1 ^ 1:
ans |= (n & 1) << i
n >>= 1
ans |= n << 31
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/multi_device_iterator_test.py | {
"start": 2021,
"end": 5249
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
"""Tests that are common to MultiDeviceIterator and OwnedMultiDeviceIterator."""
def setUp(self):
super().setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testCancelGetNextWithDevice(self, cls):
ping = data_flow_ops.FIFOQueue(capacity=2, dtypes=dtypes.int64)
pong = data_flow_ops.FIFOQueue(capacity=2, dtypes=dtypes.int64)
@def_function.function
def map_fn(v):
ball = ping.dequeue()
with ops.control_dependencies([pong.enqueue(ball)]):
return v + ping.dequeue()
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(map_fn)
# We need to set prefetch_buffer_size=0 so that we can cancel the
# MultiDeviceIteratorGetNextFromShardOp from eager. If
# prefetch_buffer_size>0, that op runs in the background threads of the
# prefetch and can only be cancelled by deleting the iterator.
multi_device_iterator = cls(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=0)
@def_function.function
def get_next_device1():
return multi_device_iterator.get_next(self._devices[1])
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
cancel_mgr = cancellation.CancellationManager()
cancel_mgr.get_cancelable_function(
get_next_device1.get_concrete_function())()
# Make sure we cancel in the middle of get_next.
ping.enqueue(0)
pong.dequeue()
cancel_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
async_executor.wait()
# Note that fetching from upstream iterator is not cancelled with the
# cancellation of get_next.
ping.enqueue(0)
# Cancelling a get_next on one device shouldn't cancel the
# multi_device_iterator and iterators on other devices.
ping.enqueue(0)
ping.enqueue(0)
self.assertEqual(1,
multi_device_iterator.get_next(self._devices[2]).numpy())
# FIXME(b/209534797): Workaround an asan error caused by this test.
# Remove the dangling reference from tf.function to ensure queue objects
# are not freed before they are flushed.
import gc # pylint: disable=g-import-not-at-top
del get_next_device1
gc.collect()
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testEmptyDataset(self, cls):
dataset = dataset_ops.Dataset.range(0)
multi_device_iterator = cls(
dataset, devices=[self._devices[1], self._devices[2]])
with self.assertRaises(errors.OutOfRangeError):
multi_device_iterator.get_next()
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testEmptyDeviceList(self, cls):
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Length for attr 'devices' of 0 must be at least minimum 1"):
cls(dataset, devices=[])
| MultiDeviceIteratorCommonTest |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 97072,
"end": 99108
} | class ____(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, Asit P. Basu (editors), *The Exponential Distribution:
Theory, Methods and Applications*, Gordon and Breach, 1995.
ISBN 10: 2884491929
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
return [ia, ib, ic]
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _ppf(self, p, a, b, c):
s = a + b
t = (b - c*np.log1p(-p))/s
return (t + sc.lambertw(-b/s * np.exp(-t)).real)/c
def _sf(self, x, a, b, c):
return np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _isf(self, p, a, b, c):
s = a + b
t = (b - c*np.log(p))/s
return (t + sc.lambertw(-b/s * np.exp(-t)).real)/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
| genexpon_gen |
python | django-extensions__django-extensions | django_extensions/management/jobs.py | {
"start": 519,
"end": 567
} | class ____(BaseJob):
when = "hourly"
| HourlyJob |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 78349,
"end": 95672
} | class ____:
"""Tests relationship tracking between tasks"""
@pytest.fixture
def flow_with_upstream_downstream(self):
@task
def upstream(result):
return result
@task
def downstream(value):
return value
@flow
def upstream_downstream_flow(result):
upstream_state = upstream(result, return_state=True)
# TODO: Running with the new engine causes the result call to return a coroutine
# because it runs on the main thread with an active event loop. We need to update
# result retrieval to be sync.
result = upstream_state.result()
if asyncio.iscoroutine(result):
result = run_coro_as_sync(result)
downstream_state = downstream(result, return_state=True)
return upstream_state, downstream_state
return upstream_downstream_flow
async def test_task_inputs_populated_with_no_upstreams(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@flow
def test_flow():
return foo.submit(1)
flow_state = test_flow(return_state=True)
x = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(x.state_details.task_run_id)
assert task_run.task_inputs == dict(x=[])
async def test_task_inputs_populated_with_no_upstreams_and_multiple_parameters(
self, prefect_client, events_pipeline
):
@task
def foo(x, *a, **k):
return x
@flow
def test_flow():
return foo.submit(1)
flow_state = test_flow(return_state=True)
x = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(x.state_details.task_run_id)
assert task_run.task_inputs == dict(x=[], a=[], k=[])
async def test_task_inputs_populated_with_one_upstream_positional_future(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@task
def bar(x, y):
return x + y
@flow
def test_flow():
a = foo.submit(1)
b = foo.submit(2)
c = bar(a, 1, return_state=True)
return a, b, c
flow_state = test_flow(return_state=True)
a, b, c = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(c.state_details.task_run_id)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=a.state_details.task_run_id)],
y=[],
)
async def test_task_inputs_populated_with_one_upstream_keyword_future(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@task
def bar(x, y):
return x + y
@flow
def test_flow():
a = foo.submit(1)
b = foo.submit(2)
c = bar(x=a, y=1, return_state=True)
return a, b, c
flow_state = test_flow(return_state=True)
a, b, c = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(c.state_details.task_run_id)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=a.state_details.task_run_id)],
y=[],
)
async def test_task_inputs_populated_with_two_upstream_futures(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@task
def bar(x, y):
return x + y
@flow
def test_flow():
a = foo.submit(1)
b = foo.submit(2)
c = bar(a, b, return_state=True)
return a, b, c
flow_state = test_flow(return_state=True)
a, b, c = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(c.state_details.task_run_id)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=a.state_details.task_run_id)],
y=[TaskRunResult(id=b.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_two_upstream_futures_from_same_task(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@task
def bar(x, y):
return x + y
@flow
def test_flow():
a = foo.submit(1)
c = bar(a, a, return_state=True)
return a, c
flow_state = test_flow(return_state=True)
a, c = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(c.state_details.task_run_id)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=a.state_details.task_run_id)],
y=[TaskRunResult(id=a.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_nested_upstream_futures(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@task
def bar(x, y):
return x, y
@flow
def test_flow():
a = foo.submit(1)
b = foo.submit(2)
c = foo.submit(3)
d = bar([a, a, b], {3: b, 4: {5: {c, 4}}}, return_state=True)
return a, b, c, d
flow_state = test_flow(return_state=True)
a, b, c, d = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(d.state_details.task_run_id)
assert comparable_inputs(task_run.task_inputs) == dict(
x={
TaskRunResult(id=a.state_details.task_run_id),
TaskRunResult(id=b.state_details.task_run_id),
},
y={
TaskRunResult(id=b.state_details.task_run_id),
TaskRunResult(id=c.state_details.task_run_id),
},
)
async def test_task_inputs_populated_with_subflow_upstream(
self, prefect_client, events_pipeline
):
@task
def foo(x):
return x
@flow
def child(x):
return x
@flow
def parent():
child_state = child(1, return_state=True)
return child_state, foo.submit(child_state)
parent_state = parent(return_state=True)
child_state, task_state = await parent_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=child_state.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_result_upstream(
self, sync_prefect_client, events_pipeline
):
@task
def name():
return "Fred"
@task
def say_hi(name):
return f"Hi {name}"
@flow
def test_flow():
my_name = name(return_state=True)
hi = say_hi(my_name.result(), return_state=True)
return my_name, hi
flow_state = test_flow(return_state=True)
name_state, hi_state = await flow_state.result()
await events_pipeline.process_events()
task_run = sync_prefect_client.read_task_run(hi_state.state_details.task_run_id)
assert task_run.task_inputs == dict(
name=[TaskRunResult(id=name_state.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_result_upstream_from_future(
self, prefect_client, events_pipeline
):
@task
def upstream(x):
return x
@task
def downstream(x):
return x
@flow
def test_flow():
upstream_future = upstream.submit(257)
downstream_state = downstream(upstream_future, return_state=True)
upstream_future.wait()
upstream_state = upstream_future.state
return upstream_state, downstream_state
upstream_state, downstream_state = test_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=upstream_state.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_result_upstream_from_state(
self, prefect_client, events_pipeline
):
@task
def upstream(x):
return x
@task
def downstream(x):
return x
@flow
def test_flow():
upstream_state = upstream(1, return_state=True)
upstream_result = upstream_state.result()
downstream_state = downstream(upstream_result, return_state=True)
return upstream_state, downstream_state
upstream_state, downstream_state = test_flow()
await events_pipeline.process_events()
await prefect_client.read_task_run(downstream_state.state_details.task_run_id)
async def test_task_inputs_populated_with_state_upstream(
self, prefect_client, events_pipeline
):
@task
def upstream(x):
return x
@task
def downstream(x):
return x
@flow
def test_flow():
upstream_state = upstream(1, return_state=True)
downstream_state = downstream(upstream_state, return_state=True)
return upstream_state, downstream_state
upstream_state, downstream_state = test_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=upstream_state.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_state_upstream_wrapped_with_allow_failure(
self, prefect_client, events_pipeline
):
@task
def upstream(x):
return x
@task
def downstream(x):
return x
@flow
def test_flow():
upstream_state = upstream(1, return_state=True)
downstream_state = downstream(
allow_failure(upstream_state), return_state=True
)
return upstream_state, downstream_state
upstream_state, downstream_state = test_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
x=[TaskRunResult(id=upstream_state.state_details.task_run_id)],
)
@pytest.mark.parametrize("result", [["Fred"], {"one": 1}, {1, 2, 2}, (1, 2)])
async def test_task_inputs_populated_with_collection_result_upstream(
self, result, prefect_client, flow_with_upstream_downstream, events_pipeline
):
flow_state = flow_with_upstream_downstream(result, return_state=True)
upstream_state, downstream_state = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
value=[TaskRunResult(id=upstream_state.state_details.task_run_id)],
)
@pytest.mark.parametrize("result", ["Fred", 5.1])
async def test_task_inputs_populated_with_basic_result_types_upstream(
self, result, prefect_client, flow_with_upstream_downstream, events_pipeline
):
flow_state = flow_with_upstream_downstream(result, return_state=True)
upstream_state, downstream_state = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(
value=[TaskRunResult(id=upstream_state.state_details.task_run_id)],
)
@pytest.mark.parametrize("result", [True, False, None, ..., NotImplemented])
async def test_task_inputs_not_populated_with_singleton_results_upstream(
self, result, prefect_client, flow_with_upstream_downstream, events_pipeline
):
flow_state = flow_with_upstream_downstream(result, return_state=True)
_, downstream_state = await flow_state.result()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
downstream_state.state_details.task_run_id
)
assert task_run.task_inputs == dict(value=[])
async def test_task_inputs_populated_with_result_upstream_from_state_with_unpacking_trackables(
self, prefect_client, events_pipeline
):
@task
def task_1():
task_3_in = [1, 2, 3]
task_2_in = "Woof!"
return task_2_in, task_3_in
@task
def task_2(task_2_input):
return (task_2_input + " Bark!",)
@task
def task_3(task_3_input):
task_3_input.append(4)
return task_3_input
@flow
def unpacking_flow():
t1_state = task_1(return_state=True)
t1_res_1, t1_res_2 = t1_state.result()
t2_state = task_2(t1_res_1, return_state=True)
t3_state = task_3(t1_res_2, return_state=True)
return t1_state, t2_state, t3_state
t1_state, t2_state, t3_state = unpacking_flow()
await events_pipeline.process_events()
task_3_run = await prefect_client.read_task_run(
t3_state.state_details.task_run_id
)
assert task_3_run.task_inputs == dict(
task_3_input=[TaskRunResult(id=t1_state.state_details.task_run_id)],
)
task_2_run = await prefect_client.read_task_run(
t2_state.state_details.task_run_id
)
assert task_2_run.task_inputs == dict(
task_2_input=[TaskRunResult(id=t1_state.state_details.task_run_id)],
)
async def test_task_inputs_populated_with_result_upstream_from_state_with_unpacking_mixed_untrackable_types(
self, prefect_client, events_pipeline
):
@task
def task_1():
task_3_in = [1, 2, 3]
task_2_in = 2
return task_2_in, task_3_in
@task
def task_2(task_2_input):
return task_2_input + 1
@task
def task_3(task_3_input):
task_3_input.append(4)
return task_3_input
@flow
def unpacking_flow():
t1_state = task_1(return_state=True)
t1_res_1, t1_res_2 = t1_state.result()
t2_state = task_2(t1_res_1, return_state=True)
t3_state = task_3(t1_res_2, return_state=True)
return t1_state, t2_state, t3_state
t1_state, t2_state, t3_state = unpacking_flow()
await events_pipeline.process_events()
task_3_run = await prefect_client.read_task_run(
t3_state.state_details.task_run_id
)
assert task_3_run.task_inputs == dict(
task_3_input=[TaskRunResult(id=t1_state.state_details.task_run_id)],
)
task_2_run = await prefect_client.read_task_run(
t2_state.state_details.task_run_id
)
assert task_2_run.task_inputs == dict(
task_2_input=[],
)
async def test_task_inputs_populated_with_result_upstream_from_state_with_unpacking_no_trackable_types(
self, prefect_client, events_pipeline
):
@task
def task_1():
task_3_in = True
task_2_in = 2
return task_2_in, task_3_in
@task
def task_2(task_2_input):
return task_2_input + 1
@task
def task_3(task_3_input):
return task_3_input
@flow
def unpacking_flow():
t1_state = task_1(return_state=True)
t1_res_1, t1_res_2 = t1_state.result()
t2_state = task_2(t1_res_1, return_state=True)
t3_state = task_3(t1_res_2, return_state=True)
return t1_state, t2_state, t3_state
t1_state, t2_state, t3_state = unpacking_flow()
await events_pipeline.process_events()
task_3_run = await prefect_client.read_task_run(
t3_state.state_details.task_run_id
)
assert task_3_run.task_inputs == dict(
task_3_input=[],
)
task_2_run = await prefect_client.read_task_run(
t2_state.state_details.task_run_id
)
assert task_2_run.task_inputs == dict(
task_2_input=[],
)
| TestTaskInputs |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 3189,
"end": 13197
} | class ____:
"""LSP workspace modification error codes."""
# Applying the workspace change is simply aborted if one
# of the changes provided fails. All operations executed before, stay.
ABORT = 'abort'
# All the operations succeed or no changes at all.
TRANSACTIONAL = 'transactional'
# The client tries to undo the applied operations, best effort strategy.
UNDO = 'undo'
# The textual changes are applied transactionally, whereas
# creation/deletion/renaming operations are aborted.
TEXT_ONLY_TRANSACTIONAL = 'textOnlyTransactional'
# -------------------- CLIENT CONFIGURATION SETTINGS --------------------------
# WorkspaceClientCapabilities define capabilities the
# editor / tool provides on the workspace
WORKSPACE_CAPABILITIES = {
# The client supports applying batch edits to the workspace.
# Request: An array of `TextDocumentEdit`s to express changes
# to n different text documents
"applyEdit": True,
# Workspace edition settings
"workspaceEdit": {
# The client supports versioned document changes.
"documentChanges": True,
# The resource operations that the client supports
"resourceOperations": [ResourceOperationKind.CREATE,
ResourceOperationKind.RENAME,
ResourceOperationKind.DELETE],
# Failure handling strategy applied by the client.
"failureHandling": FailureHandlingKind.TRANSACTIONAL
},
# Did change configuration notification supports dynamic registration.
"didChangeConfiguration": {
# Reload server settings dynamically
"dynamicRegistration": True
},
# The watched files notification is sent from the client to the server
# when the client detects changes to files watched by
# the language client.
"didChangeWatchedFiles": {
# Can be turned on/off dynamically
"dynamicRegistration": True
},
# The workspace symbol request is sent from the client to the server to
# list project-wide symbols matching the query string.
"symbol": {
# Can be turned on/off dynamically
"dynamicRegistration": True
},
# The workspace/executeCommand request is sent from the client to the
# server to trigger command execution on the server. In most cases the
# server creates a WorkspaceEdit structure and applies the changes to
# the workspace using the request workspace/applyEdit which is sent from
# the server to the client.
"executeCommand": {
# Can be turned on/off dynamically
"dynamicRegistration": True,
# Specific capabilities for the `SymbolKind` in the `workspace/symbol`
# request.
"symbolKind": {
# The symbol kind values the client supports.
"valueSet": [value for value in SymbolKind.__dict__.values()
if isinstance(value, int)]
}
},
# The client has support for workspace folders.
"workspaceFolders": True,
# The client supports `workspace/configuration` requests.
"configuration": True
}
# TextDocumentClientCapabilities define capabilities the editor / tool
# provides on text documents.
TEXT_EDITOR_CAPABILITES = {
# Editor supports file watching and synchronization (Required)
"synchronization": {
# File synchronization can be turned on/off.
"dynamicRegistration": True,
# The client (Spyder) will send a willSave notification
# to the server when a file is about to be saved.
"willSave": True,
# The client (Spyder) supports sending a will save request and
# waits for a response providing text edits which will
# be applied to the document before it is saved.
"willSaveWaitUntil": True,
# The client (Spyder) supports did save notifications.
# The document save notification is sent from the client to
# the server when the document was saved in the client.
"didSave": True
},
# Editor supports code completion operations.
# The Completion request is sent from the client to the server to
# compute completion items at a given cursor position.
"completion": {
# Code completion can be turned on/off dynamically.
"dynamicRegistration": True,
"completionItem": {
# Client (Spyder) supports snippets as insert text.
# A snippet can define tab stops and placeholders with `$1`, `$2`
# and `${3:foo}`. `$0` defines the final tab stop, it defaults to
# the end of the snippet. Placeholders with equal identifiers are
# linked, that is typing in one will update others too.
"snippetSupport": True,
# Completion item docs can only be handled in plain text
"documentationFormat": ['plaintext'],
}
},
# The hover request is sent from the client to the server to request
# hover information at a given text document position.
"hover": {
# Hover introspection can be turned on/off dynamically.
"dynamicRegistration": True,
# Hover contents can only be handled in plain text by Spyder
"contentFormat": ['plaintext'],
},
# The signature help request is sent from the client to the server to
# request signature information at a given cursor position.
"signatureHelp": {
# Function/Class/Method signature hinting can be turned on/off
# dynamically.
"dynamicRegistration": True,
# Signature docs can only be handled in plain text by Spyder
"signatureInformation": {
"documentationFormat": ['plaintext'],
}
},
# Editor allows to find references.
# The references request is sent from the client to the server to resolve
# project-wide references for the symbol denoted by the given text
# document position.
"references": {
# Find references can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor allows to highlight different text sections at the same time.
# The document highlight request is sent from the client to the server to
# resolve a document highlights for a given text document position
"documentHighlight": {
# Code highlighting can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor supports finding symbols on a document.
# The document symbol request is sent from the client to the server to list
# all symbols found in a given text document.
"documentSymbol": {
# Find symbols on document can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor allows to autoformat all the document.
# The document formatting request is sent from the server to the client to
# format a whole document.
"formatting": {
# Document formatting can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor can autoformat only a selected region on a document.
# The document range formatting request is sent from the client to the
# server to format a given range in a document.
"rangeFormatting": {
# Partial document formatting can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor allows to format a document while an edit is taking place.
# The document on type formatting request is sent from the client to the
# server to format parts of the document during typing.
"onTypeFormatting": {
# On-Type formatting can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor has an option to go to a function/class/method definition.
# The goto definition request is sent from the client to the server to
# resolve the definition location of a symbol at a given text document
# position.
"definition": {
# Go-to-definition can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor can give/highlight refactor tips/solutions.
# The code action request is sent from the client to the server to compute
# commands for a given text document and range. These commands are
# typically code fixes to either fix problems or to beautify/refactor code.
"codeAction": {
# Code hints can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor can display additional commands/statistics per each line.
# The code lens request is sent from the client to the server to compute
# code lenses for a given text document.
# A code lens represents a command that should be shown along with
# source text, like the number of references, a way to run tests, etc.
"codeLens": {
# Code lens can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor allows to find cross-document link references.
# The document links request is sent from the client to the server to
# request the location of links in a document.
# A document link is a range in a text document that links to an internal
# or external resource, like another text document or a web site.
"documentLink": {
# Finding document cross-references can be turned on/off dynamically.
"dynamicRegistration": True
},
# Editor allows to rename a variable/function/reference globally
# on a document.
# The rename request is sent from the client to the server to perform
# a workspace-wide rename of a symbol.
"rename": {
"dynamicRegistration": True
}
}
# Spyder editor and workspace capabilities
CLIENT_CAPABILITES = {
"workspace": WORKSPACE_CAPABILITIES,
"textDocument": TEXT_EDITOR_CAPABILITES
}
# -------------------- SERVER CONFIGURATION SETTINGS --------------------------
# Text document synchronization mode constants
| FailureHandlingKind |
python | readthedocs__readthedocs.org | readthedocs/organizations/managers.py | {
"start": 205,
"end": 1242
} | class ____(models.Manager):
"""Manager to control team's access."""
def teams_for_user(self, user, organization, admin, member):
teams = self.get_queryset().none()
if admin:
# Project Team Admin
teams |= user.teams.filter(access=ADMIN_ACCESS)
# Org Admin
for org in user.owner_organizations.all():
teams |= org.teams.all()
if member:
# Project Team Member
teams |= user.teams.filter(access=READ_ONLY_ACCESS)
if organization:
teams = teams.filter(organization=organization)
return teams.distinct()
def admin(self, user, organization=None):
return self.teams_for_user(
user,
organization,
admin=True,
member=False,
)
def member(self, user, organization=None):
return self.teams_for_user(
user,
organization,
admin=True,
member=True,
)
| TeamManagerBase |
python | getsentry__sentry | src/sentry/seer/endpoints/trace_explorer_ai_query.py | {
"start": 1522,
"end": 4811
} | class ____(OrganizationEndpoint):
"""
This endpoint is called when a user visits the trace explorer with the correct flags enabled.
"""
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ML_AI
permission_classes = (OrganizationTraceExplorerAIPermission,)
@staticmethod
def post(request: Request, organization: Organization) -> Response:
"""
Request to translate a natural language query into a sentry EQS query.
"""
if not request.user.is_authenticated:
return Response(
{"detail": "User is not authenticated"},
status=status.HTTP_400_BAD_REQUEST,
)
project_ids = [int(x) for x in request.data.get("project_ids", [])]
natural_language_query = request.data.get("natural_language_query")
limit = request.data.get("limit", 1)
if len(project_ids) == 0 or not natural_language_query:
return Response(
{
"detail": "Missing one or more required parameters: project_ids, natural_language_query"
},
status=status.HTTP_400_BAD_REQUEST,
)
if organization.get_option("sentry:hide_ai_features", False):
return Response(
{"detail": "AI features are disabled for this organization."},
status=status.HTTP_403_FORBIDDEN,
)
if not features.has(
"organizations:gen-ai-explore-traces", organization=organization, actor=request.user
) or not features.has(
"organizations:gen-ai-features", organization=organization, actor=request.user
):
return Response(
{"detail": "Organization does not have access to this feature"},
status=status.HTTP_403_FORBIDDEN,
)
if not settings.SEER_AUTOFIX_URL:
return Response(
{"detail": "Seer is not properly configured."},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
data = send_translate_request(
organization.id, organization.slug, project_ids, natural_language_query
)
responses = data.get("responses", [])[:limit]
unsupported_reason = data.get("unsupported_reason")
if len(responses) == 0 and not unsupported_reason:
logger.info("No results found for query")
return Response(
{"detail": "No results found for query"},
status=status.HTTP_404_NOT_FOUND,
)
return Response(
{
"status": "ok",
"queries": [
{
"query": query["query"],
"stats_period": query["stats_period"],
"group_by": list(query.get("group_by", [])),
"visualization": list(query.get("visualization", [])),
"sort": query["sort"],
"mode": query.get("mode", "spans"),
}
for query in responses
],
"unsupported_reason": unsupported_reason,
}
)
| TraceExplorerAIQuery |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 41535,
"end": 42802
} | class ____(PsutilTestCase):
@mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
@mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
def test_emulate_ipv6_unsupported(self, supports_ipv6, inet_ntop):
# see: https://github.com/giampaolo/psutil/issues/623
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
try:
s.bind(("::1", 0))
except OSError:
pass
psutil.net_connections(kind='inet6')
def test_emulate_unix(self):
content = textwrap.dedent("""\
0: 00000003 000 000 0001 03 462170 @/tmp/dbus-Qw2hMPIU3n
0: 00000003 000 000 0001 03 35010 @/tmp/dbus-tB2X8h69BQ
0: 00000003 000 000 0001 03 34424 @/tmp/dbus-cHy80Y8O
000000000000000000000000000000000000000000000000000000
""")
with mock_open_content({"/proc/net/unix": content}) as m:
psutil.net_connections(kind='unix')
assert m.called
# =====================================================================
# --- system disks
# =====================================================================
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestSystemNetConnections |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-dataverse/source_microsoft_dataverse/dataverse.py | {
"start": 733,
"end": 1043
} | class ____(Enum):
String = {"type": ["null", "string"]}
Boolean = {"type": ["null", "boolean"]}
Timestamp = {"type": ["null", "string"], "format": "date-time", "airbyte_type": "timestamp_with_timezone"}
Integer = {"type": ["null", "integer"]}
Number = {"type": ["null", "number"]}
| AirbyteType |
python | PrefectHQ__prefect | tests/_internal/pydantic/test_validated_func.py | {
"start": 2758,
"end": 4267
} | class ____:
"""Test *args and **kwargs handling."""
def test_var_positional(self):
def sum_all(*numbers: int):
return sum(numbers)
vf = ValidatedFunction(sum_all)
result = vf.validate_call_args((1, 2, 3, 4, 5), {})
assert result == {"numbers": [1, 2, 3, 4, 5]}
def test_var_keyword(self):
def print_kwargs(**kwargs):
return kwargs
vf = ValidatedFunction(print_kwargs)
result = vf.validate_call_args((), {"a": 1, "b": 2, "c": 3})
assert result == {"kwargs": {"a": 1, "b": 2, "c": 3}}
def test_mixed_with_var_positional(self):
def func(a: int, b: int, *rest):
return (a, b, rest)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3, 4, 5), {})
assert result == {"a": 1, "b": 2, "rest": [3, 4, 5]}
def test_mixed_with_var_keyword(self):
def func(a: int, b: int = 0, **kwargs):
return (a, b, kwargs)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1,), {"b": 2, "c": 3, "d": 4})
assert result == {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}}
def test_both_var_args_and_kwargs(self):
def func(a: int, *args, **kwargs):
return (a, args, kwargs)
vf = ValidatedFunction(func)
result = vf.validate_call_args((1, 2, 3), {"x": 10, "y": 20})
assert result == {"a": 1, "args": [2, 3], "kwargs": {"x": 10, "y": 20}}
| TestVariadicArguments |
python | nedbat__coveragepy | tests/test_process.py | {
"start": 47332,
"end": 47679
} | class ____(CoverageTest):
"""Test that nothing to report results in an error exit status."""
def test_report(self) -> None:
self.make_file(".coveragerc", "[report]\nfail_under = 99\n")
st, out = self.run_command_status("coverage report")
assert "No data to report." in out
assert st == 1
| FailUnderNoFilesTest |
python | spack__spack | lib/spack/spack/vendor/jinja2/environment.py | {
"start": 58085,
"end": 61100
} | class ____:
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen: t.Iterator[str]) -> None:
self._gen = gen
self.disable_buffering()
def dump(
self,
fp: t.Union[str, t.IO],
encoding: t.Optional[str] = None,
errors: t.Optional[str] = "strict",
) -> None:
"""Dump the complete stream into a file or file-like object.
Per default strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, str):
if encoding is None:
encoding = "utf-8"
fp = open(fp, "wb")
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self) # type: ignore
else:
iterable = self # type: ignore
if hasattr(fp, "writelines"):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self) -> None:
"""Disable the output buffering."""
self._next = partial(next, self._gen)
self.buffered = False
def _buffered_generator(self, size: int) -> t.Iterator[str]:
buf: t.List[str] = []
c_size = 0
push = buf.append
while True:
try:
while c_size < size:
c = next(self._gen)
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
def enable_buffering(self, size: int = 5) -> None:
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError("buffer size too small")
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
def __iter__(self) -> "TemplateStream":
return self
def __next__(self) -> str:
return self._next() # type: ignore
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| TemplateStream |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 30840,
"end": 30969
} | class ____(AbstractSingleRange[datetime]):
"""Represent the PostgreSQL TSRANGE type."""
__visit_name__ = "TSRANGE"
| TSRANGE |
python | getsentry__sentry | tests/sentry/issues/test_grouptype.py | {
"start": 1367,
"end": 4856
} | class ____(BaseGroupTypeTest):
def test_get_types_by_category(self) -> None:
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 1
slug = "test"
description = "Test"
category = GroupCategory.ERROR.value
category_v2 = GroupCategory.ERROR.value
ignore_limit = 0
@dataclass(frozen=True)
class TestGroupType2(GroupType):
type_id = 2
slug = "hellboy"
description = "Hellboy"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
@dataclass(frozen=True)
class TestGroupType3(GroupType):
type_id = 3
slug = "angelgirl"
description = "AngelGirl"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
assert get_group_types_by_category(GroupCategory.PERFORMANCE.value) == {2, 3}
assert get_group_types_by_category(GroupCategory.ERROR.value) == {1}
def test_get_group_type_by_slug(self) -> None:
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 1
slug = "test"
description = "Test"
category = GroupCategory.ERROR.value
category_v2 = GroupCategory.ERROR.value
ignore_limit = 0
assert get_group_type_by_slug(TestGroupType.slug) == TestGroupType
assert get_group_type_by_slug("meow") is None
def test_category_validation(self) -> None:
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 1
slug = "error"
description = "Error"
category = 22
category_v2 = 22
with self.assertRaisesMessage(
ValueError,
f"Category must be one of {[category.value for category in GroupCategory]} from GroupCategory",
):
TestGroupType(1, "error", "Error", 22, 22)
def test_default_noise_config(self) -> None:
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 1
slug = "test"
description = "Test"
category = GroupCategory.ERROR.value
category_v2 = GroupCategory.ERROR.value
@dataclass(frozen=True)
class TestGroupType2(GroupType):
type_id = 2
slug = "hellboy"
description = "Hellboy"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
noise_config = NoiseConfig()
assert TestGroupType.noise_config is None
assert TestGroupType2.noise_config == NoiseConfig()
assert TestGroupType2.noise_config.ignore_limit == DEFAULT_IGNORE_LIMIT
assert TestGroupType2.noise_config.expiry_time == DEFAULT_EXPIRY_TIME
def test_noise_config(self) -> None:
@dataclass(frozen=True)
class TestGroupType(GroupType):
type_id = 2
slug = "hellboy"
description = "Hellboy"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
noise_config = NoiseConfig(ignore_limit=100, expiry_time=timedelta(hours=12))
assert TestGroupType.noise_config.ignore_limit == 100
assert TestGroupType.noise_config.expiry_time == timedelta(hours=12)
| GroupTypeTest |
python | facebook__pyre-check | tools/incremental_test/specification.py | {
"start": 4969,
"end": 5245
} | class ____(RepositoryUpdate):
@abstractmethod
def update(self, environment: Environment, working_directory: Path) -> None:
raise NotImplementedError()
def update_steps(self) -> List["SingleUpdate"]:
return [self]
@dataclass(frozen=True)
| SingleUpdate |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/response.py | {
"start": 564,
"end": 615
} | class ____(TypedDict):
resourceType: str
| SCIMMeta |
python | coleifer__peewee | tests/reflection.py | {
"start": 1645,
"end": 1835
} | class ____(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
| BaseReflectionTestCase |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr.py | {
"start": 42853,
"end": 47681
} | class ____(nn.Module):
def __init__(self, config: RTDetrConfig):
super().__init__()
# self-attention
self.self_attn = RTDetrMultiheadAttention(
embed_dim=config.d_model,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.decoder_activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
# cross-attention
self.encoder_attn = RTDetrMultiscaleDeformableAttention(
config,
num_heads=config.decoder_attention_heads,
n_points=config.decoder_n_points,
)
self.encoder_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
# feedforward neural networks
self.fc1 = nn.Linear(config.d_model, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, config.d_model)
self.final_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
second_residual = hidden_states
# Cross-Attention
cross_attn_weights = None
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = second_residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| RTDetrDecoderLayer |
python | tensorflow__tensorflow | tensorflow/python/ops/op_selector_test.py | {
"start": 1014,
"end": 6765
} | class ____(test.TestCase):
def setUp(self):
self.graph = ops_lib.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops_lib.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops_lib.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops_lib.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
def test_is_iterable(self):
"""Test for is_iterable."""
self.assertTrue(op_selector.is_iterable([0, 1, 2]))
self.assertFalse(op_selector.is_iterable(3))
def test_unique_graph(self):
"""Test for check_graphs and get_unique_graph."""
g0 = ops_lib.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
g1 = ops_lib.Graph()
with g1.as_default():
a1 = constant_op.constant(1)
b1 = constant_op.constant(2)
# Same graph, should be fine.
self.assertIsNone(op_selector.check_graphs(a0, b0))
# Two different graphs, should assert.
with self.assertRaises(ValueError):
op_selector.check_graphs(a0, b0, a1, b1)
# a0 and b0 belongs to the same graph, should be fine.
self.assertEqual(op_selector.get_unique_graph([a0, b0]), g0)
# Different graph, should raise an error.
with self.assertRaises(ValueError):
op_selector.get_unique_graph([a0, b0, a1, b1])
def test_unique_graph_func_graph(self):
"""Test for get_unique_graph with FuncGraph."""
outer = ops_lib.Graph()
with outer.as_default():
k1 = constant_op.constant(1)
inner = func_graph.FuncGraph("inner")
inner._graph_key = outer._graph_key
with inner.as_default():
k2 = constant_op.constant(2)
unique_graph = op_selector.get_unique_graph([k1, k2])
self.assertEqual(unique_graph._graph_key, inner._graph_key)
def test_make_list_of_op(self):
"""Test for make_list_of_op."""
g0 = ops_lib.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
# Should extract the ops from the graph.
self.assertEqual(len(op_selector.make_list_of_op(g0)), 2)
# Should extract the ops from the tuple.
self.assertEqual(len(op_selector.make_list_of_op((a0.op, b0.op))), 2)
def test_make_list_of_t(self):
"""Test for make_list_of_t."""
g0 = ops_lib.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable
# Should extract the tensors from the graph.
self.assertEqual(len(op_selector.make_list_of_t(g0)), 3)
# Should extract the tensors from the tuple
self.assertEqual(len(op_selector.make_list_of_t((a0, b0))), 2)
# Should extract the tensors and ignore the ops.
self.assertEqual(
len(op_selector.make_list_of_t(
(a0, a0.op, b0), ignore_ops=True)), 2)
def test_get_generating_consuming(self):
"""Test for get_generating_ops and get_consuming_ops."""
g0 = ops_lib.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0)
self.assertEqual(len(op_selector.get_generating_ops([a0, b0])), 2)
self.assertEqual(len(op_selector.get_consuming_ops([a0, b0])), 1)
self.assertEqual(len(op_selector.get_generating_ops([c0])), 1)
self.assertEqual(op_selector.get_consuming_ops([c0]), [])
def test_backward_walk_ops(self):
seed_ops = [self.h.op]
# Include all ops except for self.g.op
within_ops = [
x.op for x in [self.a, self.b, self.c, self.d, self.e, self.f, self.h]
]
# For the fn, exclude self.c.op.
within_ops_fn = lambda op: op not in (self.c.op,)
stop_at_ts = (self.f,)
with self.graph.as_default():
# Backward walk only includes h since we stop at f and g is not within.
ops = op_selector.get_backward_walk_ops(
seed_ops,
inclusive=True,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), set([self.h.op]))
# If we do inclusive=False, the result is empty.
ops = op_selector.get_backward_walk_ops(
seed_ops,
inclusive=False,
within_ops=within_ops,
within_ops_fn=within_ops_fn,
stop_at_ts=stop_at_ts)
self.assertEqual(set(ops), set())
# Removing stop_at_fs adds f.op, d.op.
ops = op_selector.get_backward_walk_ops(
seed_ops,
inclusive=True,
within_ops=within_ops,
within_ops_fn=within_ops_fn)
self.assertEqual(set(ops), set([self.d.op, self.f.op, self.h.op]))
# Not using within_ops_fn adds back ops for a, b, c.
ops = op_selector.get_backward_walk_ops(
seed_ops, inclusive=True, within_ops=within_ops)
self.assertEqual(
set(ops),
set([
self.a.op, self.b.op, self.c.op, self.d.op, self.f.op, self.h.op
]))
# Vanially backward search via self.h.op includes everything except e.op.
ops = op_selector.get_backward_walk_ops(seed_ops, inclusive=True)
self.assertEqual(
set(ops),
set([
self.a.op, self.b.op, self.c.op, self.d.op, self.f.op, self.g.op,
self.h.op
]))
if __name__ == "__main__":
test.main()
| SelectTest |
python | getsentry__sentry | src/sentry/api/endpoints/timeseries.py | {
"start": 840,
"end": 1005
} | class ____(TypedDict):
meta: NotRequired[StatsMeta]
timeSeries: list[TimeSeries]
EMPTY_STATS_RESPONSE: dict[str, Any] = {
"timeSeries": [],
}
| StatsResponse |
python | astropy__astropy | astropy/io/fits/tests/test_image.py | {
"start": 347,
"end": 46372
} | class ____(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ""
assert "EXTNAME" not in hdu.header
hdu.name = "FOO"
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# Passing name to constructor
hdu = fits.ImageHDU(name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
# And overriding a header with a different extname
hdr = fits.Header()
hdr["EXTNAME"] = "EVENTS"
hdu = fits.ImageHDU(header=hdr, name="FOO")
assert hdu.name == "FOO"
assert hdu.header["EXTNAME"] == "FOO"
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
__tracebackhide__ = True
assert hdu.ver == reference_ver
assert hdu.header["EXTVER"] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert "EXTVER" not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr["EXTVER"] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr["EXTVER"] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr["FILENAME"] = "labq01i3q_rawtag.fits"
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header["FILENAME"] = "labq01i3q_flt.fits"
# Original header should be unchanged
assert phdr["FILENAME"] == "labq01i3q_rawtag.fits"
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data("test0.fits")) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
def test_open_2(self):
r = fits.open(self.data("test0.fits"))
info = [(0, "PRIMARY", 1, "PrimaryHDU", 138, (), "", "")] + [
(x, "SCI", x, "ImageHDU", 61, (40, 40), "int16", "") for x in range(1, 5)
]
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data("test0.fits"))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == (
"HDU not found, possibly because the index "
"is out of range, or because the file was "
"closed before all HDUs were read"
)
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data("test0.fits"), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == "list index out of range"
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data("test0.fits"))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp("a_str.fits")
bfits = self.temp("b_str.fits")
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp("a_fileobj.fits")
bbfits = self.temp("b_fileobj.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp("a_str_slice.fits")
bfits = self.temp("b_str_slice.fits")
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp("a_fileobj_slice.fits")
bbfits = self.temp("b_fileobj_slice.fits")
with open(aafits, mode="wb") as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode="wb") as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([("EXTNAME", "XPRIMARY"), ("EXTVER", 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert "EXTNAME" in hdul[0].header
assert hdul[0].name == "XPRIMARY"
assert hdul[0].name == hdul[0].header["EXTNAME"]
info = [(0, "XPRIMARY", 1, "PrimaryHDU", 5, (), "", "")]
assert hdul.info(output=False) == info
assert hdul["PRIMARY"] is hdul["XPRIMARY"]
assert hdul["PRIMARY"] is hdul[("XPRIMARY", 1)]
hdul[0].name = "XPRIMARY2"
assert hdul[0].header["EXTNAME"] == "XPRIMARY2"
hdul.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].name == "XPRIMARY2"
@pytest.mark.filterwarnings(
"ignore:Memory map object was closed but appears to still be referenced:UserWarning"
)
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data("test0.fits")) as r:
assert r["primary"].header["naxis"] == 0
assert r[0].header["naxis"] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r["sci", 1].header["detector"] == 1
# append (using "update()") a new card
r[0].header["xxx"] = 1.234e56
assert (
"\n".join(str(x) for x in r[0].header.cards[-3:])
== "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 "
)
# rename a keyword
r[0].header.rename_keyword("filename", "fname")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "history")
pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "simple")
r[0].header.rename_keyword("fname", "filename")
# get a subsection of data
assert np.array_equal(
r[2].data[:3, :3],
np.array(
[[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16
),
)
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp("test_new.fits"), mode="append") as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp("test_new.fits"), self.temp("test_append.fits"))
with fits.open(self.temp("test_append.fits"), mode="append") as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp("test_append.fits"), self.temp("test_update.fits"))
with fits.open(self.temp("test_update.fits"), mode="update") as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header["rootname"] == "U2EQ0201T"
u[0].header["rootname"] = "abc"
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp("test_new.fits"))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data("test0.fits")) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name="SCI")
assert np.array_equal(
hdu.data,
np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
],
dtype=np.float32,
),
)
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype="int32"))
assert (
"\n".join(str(x) for x in hdu2.header.cards[1:5])
== "BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters "
)
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data("test0.fits"), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(fits.verify.VerifyWarning) as w:
hdu.verify()
assert len(w) == 3
assert "HDUList's 0th element is not a primary HDU" in str(w[1].message)
with pytest.warns(fits.verify.VerifyWarning) as w:
hdu.writeto(self.temp("test_new2.fits"), "fix")
assert len(w) == 3
assert "Fixed by inserting one as 0th HDU" in str(w[1].message)
def test_section(self):
# section testing
fs = fits.open(self.data("arange.fits"))
assert fs[0].section.dtype == "int32"
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]),
)
assert np.array_equal(
fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362])
)
assert np.array_equal(
fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359])
)
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array(
[
[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384],
]
),
)
assert np.array_equal(
fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]]),
)
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(
fs[0].section[3:6, :, :][:3, :3, :3],
np.array(
[
[[330, 331, 332], [341, 342, 343], [352, 353, 354]],
[[440, 441, 442], [451, 452, 453], [462, 463, 464]],
[[550, 551, 552], [561, 562, 563], [572, 573, 574]],
]
),
)
assert np.array_equal(
fs[0].section[:, :, :][:3, :2, :2],
np.array(
[[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]]
),
)
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3])
bool_index = np.array(
[True, False, True, True, False, False, True, True, False, True]
)
assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :])
assert np.array_equal(fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3])
# Can we use negative indices?
assert np.array_equal(fs[0].section[-1], dat[-1])
assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7])
assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
]:
for idx3 in [
slice(0, 1),
slice(0, 2),
slice(0, 3),
slice(1, 2),
slice(1, 3),
slice(2, 3),
]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
@pytest.mark.parametrize(
"file, expected_dtype",
[("scale.fits", "float32"), ("fixed-1890.fits", "uint16")],
)
def test_section_data_scaled(self, file, expected_dtype):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data(file))
d = hdul[0]
dat = hdul[0].data
assert d.section.dtype == expected_dtype
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data(file))
d = hdul[0]
assert d.section.dtype == expected_dtype
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype(">i2")
with fits.open(self.data("scale.fits")) as hdul:
assert hdul[0].data.dtype == np.dtype("float32")
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp("test_new.fits"), data=np.array([], dtype="uint8"))
d = np.zeros([100, 100]).astype("uint16")
fits.append(self.temp("test_new.fits"), data=d)
with fits.open(self.temp("test_new.fits"), uint=True) as f:
assert f[1].data.dtype == "uint16"
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type="uint8", bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2**int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f"uint{int_size}"
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
filename = f"uint{int_size}.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f"uint{int_size}"
assert "BZERO" in new_uint_hdu.header
assert new_uint_hdu.header["BZERO"] == (2 ** (int_size - 1))
@pytest.mark.parametrize(("from_file"), (False, True))
@pytest.mark.parametrize(("do_not_scale"), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(
self, from_file, do_not_scale
):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype="uint16")
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = "unsigned_int.fits"
tmp_uint.writeto(self.temp(filename))
with fits.open(
self.temp(filename), do_not_scale_image_data=do_not_scale
) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert "BSCALE" in uint_hdu.header
assert "BZERO" in uint_hdu.header
assert uint_hdu.header["BSCALE"] == 1
assert uint_hdu.header["BZERO"] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header["BITPIX"] < 0
# BSCALE and BZERO should NOT be in header any more.
assert "BSCALE" not in uint_hdu.header
assert "BZERO" not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = "test_uint_to_float.fits"
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header["BLANK"] = 999
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header["BLANK"] = 2
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
hdu.writeto(self.temp("test_new.fits"))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(
AstropyUserWarning, match="Invalid 'BLANK' keyword in header"
) as w:
with fits.open(self.temp("test_new.fits")) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale("int16", bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp("test.fits")
hdu.data[0] = 9999
hdu.header["BLANK"] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(
fits.verify.VerifyWarning, match=r"Invalid 'BLANK' keyword in header"
):
hdul.writeto(self.temp("test2.fits"))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp("test2.fits")) as hdul2:
assert "BLANK" not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True, mode="update") as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename, do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header["BLANK"] == 9999
assert hdul4[0].header["BSCALE"] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header["BZERO"] = 1.0
hdu.writeto(self.temp("test_new.fits"))
with fits.open(self.temp("test_new.fits")) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data("fixed-1890.fits"))
orig_data = hdul[0].data
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data("fixed-1890.fits"))
hdul.writeto(self.temp("test_new.fits"), overwrite=True)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data("fixed-1890.fits"), do_not_scale_image_data=True)
hdul.writeto(
self.temp("test_new.fits"), overwrite=True, output_verify="silentfix"
)
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp("test_new.fits"), mode="update")
hdul.close()
hdul = fits.open(self.temp("test_new.fits"))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
testfile = self.copy_file("test0.fits")
with fits.open(testfile, mode="update") as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy["NAXIS*"]
hdul[1].header = hdr_copy
with fits.open(testfile) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
testfile = self.copy_file("scale.fits")
mtime = os.stat(testfile).st_mtime
time.sleep(1)
fits.open(testfile, mode="update").close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(testfile).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(testfile, "update")
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(testfile).st_mtime
hdul = fits.open(testfile, mode="update")
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(testfile)
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype(">f4")
assert hdul[0].header["BITPIX"] == -32
assert "BZERO" not in hdul[0].header
assert "BSCALE" not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
testfile = self.copy_file("scale.fits")
with fits.open(testfile, mode="update", scale_back=True) as hdul:
orig_bitpix = hdul[0].header["BITPIX"]
orig_bzero = hdul[0].header["BZERO"]
orig_bscale = hdul[0].header["BSCALE"]
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(testfile, do_not_scale_image_data=True) as hdul:
assert hdul[0].header["BITPIX"] == orig_bitpix
assert hdul[0].header["BZERO"] == orig_bzero
assert hdul[0].header["BSCALE"] == orig_bscale
zero_point = math.floor(-orig_bzero / orig_bscale)
assert (hdul[0].data[0] == zero_point).all()
with fits.open(testfile) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data("test0.fits")) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as h:
assert h[1].data is None
assert h[1].header["NAXIS"] == 0
assert "NAXIS1" not in h[1].header
assert "NAXIS2" not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header["BLANK"] = "nan"
with pytest.warns(fits.verify.VerifyWarning) as w:
hdu.writeto(self.temp("test.fits"))
assert "Invalid value for 'BLANK' keyword in header: 'nan'" in str(w[0].message)
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale("int16", bzero=99.0)
hdu2.scale("int16", bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as (
hdu,
):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r"data object array\(1\) should have at least one dimension"
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
# Regression test for https://github.com/astropy/astropy/issues/14527
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=np.array(1))
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=np.array(1))
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_scale_floats():
data = np.arange(10) / 10
hdu = fits.ImageHDU(data)
hdu.scale("float32")
np.testing.assert_array_equal(hdu.data, data.astype("float32"))
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename("data/compressed_float_bzero.fits")
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmp_path):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmp_path / "floatimg_with_bzero.fits"
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header["BZERO"] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmp_path):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmp_path / "test.fits"
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
def test_int8(tmp_path):
"""Test for int8 support, https://github.com/astropy/astropy/issues/11995"""
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header["BITPIX"] == 8
assert hdul[0].header["BZERO"] == -128
assert hdul[0].header["BSCALE"] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
| TestImageFunctions |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-intercom/components.py | {
"start": 9003,
"end": 9548
} | class ____:
"""
Singleton class that manages a reset signal for Intercom's companies stream.
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.reset_signal = False
return cls._instance
def is_reset_triggered(self) -> bool:
return self.reset_signal
def trigger_reset(self) -> None:
self.reset_signal = True
def clear_reset(self) -> None:
self.reset_signal = False
| ResetCursorSignal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.