language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/sam3_tracker/test_modeling_sam3_tracker.py | {
"start": 2571,
"end": 4127
} | class ____:
def __init__(
self,
hidden_size=32,
hidden_act="relu",
mlp_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
attention_downsample_rate=2,
num_multimask_outputs=3,
iou_head_depth=3,
iou_head_hidden_dim=32,
):
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.mlp_dim = mlp_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_downsample_rate = attention_downsample_rate
self.num_multimask_outputs = num_multimask_outputs
self.iou_head_depth = iou_head_depth
self.iou_head_hidden_dim = iou_head_hidden_dim
def get_config(self):
return Sam3TrackerMaskDecoderConfig(
hidden_size=self.hidden_size,
hidden_act=self.hidden_act,
mlp_dim=self.mlp_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
attention_downsample_rate=self.attention_downsample_rate,
num_multimask_outputs=self.num_multimask_outputs,
iou_head_depth=self.iou_head_depth,
iou_head_hidden_dim=self.iou_head_hidden_dim,
)
def prepare_config_and_inputs(self):
config = self.get_config()
dummy_inputs = {
"image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
}
return config, dummy_inputs
| Sam3TrackerMaskDecoderTester |
python | getsentry__sentry | src/sentry/integrations/jira/integration.py | {
"start": 5015,
"end": 47020
} | class ____(IssueSyncIntegration):
outbound_status_key = "sync_status_forward"
inbound_status_key = "sync_status_reverse"
outbound_assignee_key = "sync_forward_assignment"
inbound_assignee_key = "sync_reverse_assignment"
issues_ignored_fields_key = "issues_ignored_fields"
resolution_strategy_key = "resolution_strategy"
comment_key = "sync_comments"
@classproperty
def use_email_scope(cls):
return settings.JIRA_USE_EMAIL_SCOPE
def get_organization_config(self) -> list[dict[str, Any]]:
configuration: list[dict[str, Any]] = self._get_organization_config_default_values()
client = self.get_client()
try:
projects: list[JiraProjectMapping] = [
JiraProjectMapping(value=p["id"], label=p["name"])
for p in client.get_projects_list()
]
self._set_status_choices_in_organization_config(configuration, projects)
configuration[0]["addDropdown"]["items"] = projects
except ApiError:
configuration[0]["disabled"] = True
configuration[0]["disabledReason"] = _(
"Unable to communicate with the Jira instance. You may need to reinstall the addon."
)
context = organization_service.get_organization_by_id(
id=self.organization_id, include_projects=False, include_teams=False
)
assert context, "organizationcontext must exist to get org"
organization = context.organization
has_issue_sync = features.has("organizations:integrations-issue-sync", organization)
if not has_issue_sync:
for field in configuration:
field["disabled"] = True
field["disabledReason"] = _(
"Your organization does not have access to this feature"
)
return configuration
def _set_status_choices_in_organization_config(
self, configuration: list[dict[str, Any]], jira_projects: list[JiraProjectMapping]
) -> list[dict[str, Any]]:
"""
Set the status choices in the provided organization config.
This will mutate the provided config object and replace the existing
mappedSelectors field with the status choices. We will set the status choices per-project for the organization=
"""
client = self.get_client()
if len(jira_projects) <= MAX_PER_PROJECT_QUERIES:
# If we have less projects than the max query limit, and the feature
# flag is enabled for the organization, we can query the statuses
# for each project. This ensures we don't display statuses that are
# not applicable to each project when it won't result in us hitting
# Atlassian rate limits.
try:
for project in jira_projects:
project_id = project["value"]
project_statuses = client.get_project_statuses(project_id).get("values", [])
statuses = [(c["id"], c["name"]) for c in project_statuses]
configuration[0]["mappedSelectors"][project_id] = {
"on_resolve": {"choices": statuses},
"on_unresolve": {"choices": statuses},
}
configuration[0]["perItemMapping"] = True
return configuration
except ApiError as e:
if isinstance(e, ApiRateLimitedError):
logger.info(
"jira.get-project-statuses.rate-limited",
extra={
"org_id": self.organization_id,
"integration_id": self.model.id,
"project_count": len(jira_projects),
},
)
raise
# Fallback logic to the global statuses per project. This may occur if
# there are too many projects we need to fetch.
logger.info(
"jira.get-project-statuses.fallback",
extra={
"org_id": self.organization_id,
"integration_id": self.model.id,
"project_count": len(jira_projects),
},
)
statuses = [(c["id"], c["name"]) for c in client.get_valid_statuses()]
configuration[0]["mappedSelectors"] = {
"on_resolve": {"choices": statuses},
"on_unresolve": {"choices": statuses},
}
return configuration
def _get_organization_config_default_values(self) -> list[dict[str, Any]]:
return [
{
"name": self.outbound_status_key,
"type": "choice_mapper",
"label": _("Sync Sentry Status to Jira"),
"help": _(
"When a Sentry issue changes status, change the status of the linked ticket in Jira."
),
"addButtonText": _("Add Jira Project"),
"addDropdown": {
"emptyMessage": _("All projects configured"),
"noResultsMessage": _("Could not find Jira project"),
"items": [], # Populated with projects
},
"mappedSelectors": {},
"columnLabels": {
"on_resolve": _("When resolved"),
"on_unresolve": _("When unresolved"),
},
"mappedColumnLabel": _("Jira Project"),
"formatMessageValue": False,
},
{
"name": self.outbound_assignee_key,
"type": "boolean",
"label": _("Sync Sentry Assignment to Jira"),
"help": _(
"When an issue is assigned in Sentry, assign its linked Jira ticket to the same user."
),
},
{
"name": self.comment_key,
"type": "boolean",
"label": _("Sync Sentry Comments to Jira"),
"help": _("Post comments from Sentry issues to linked Jira tickets"),
},
{
"name": self.inbound_status_key,
"type": "boolean",
"label": _("Sync Jira Status to Sentry"),
"help": _(
"When a Jira ticket is marked done, resolve its linked issue in Sentry. "
"When a Jira ticket is removed from being done, unresolve its linked Sentry issue."
),
},
{
"name": self.inbound_assignee_key,
"type": "boolean",
"label": _("Sync Jira Assignment to Sentry"),
"help": _(
"When a ticket is assigned in Jira, assign its linked Sentry issue to the same user."
),
},
{
"name": self.resolution_strategy_key,
"label": "Resolve",
"type": "select",
"placeholder": "Resolve",
"choices": [
("resolve", "Resolve"),
("resolve_current_release", "Resolve in Current Release"),
("resolve_next_release", "Resolve in Next Release"),
],
"help": _(
"Select what action to take on Sentry Issue when Jira ticket is marked Done."
),
},
{
"name": self.issues_ignored_fields_key,
"label": "Ignored Fields",
"type": "textarea",
"placeholder": _("components, security, customfield_10006"),
"help": _("Comma-separated Jira field IDs that you want to hide."),
},
]
def update_organization_config(self, data):
"""
Update the configuration field for an organization integration.
"""
config = self.org_integration.config
if "sync_status_forward" in data:
project_mappings = data.pop("sync_status_forward")
if any(
not mapping["on_unresolve"] or not mapping["on_resolve"]
for mapping in project_mappings.values()
):
raise IntegrationError("Resolve and unresolve status are required.")
data["sync_status_forward"] = bool(project_mappings)
IntegrationExternalProject.objects.filter(
organization_integration_id=self.org_integration.id
).delete()
for project_id, statuses in project_mappings.items():
IntegrationExternalProject.objects.create(
organization_integration_id=self.org_integration.id,
external_id=project_id,
resolved_status=statuses["on_resolve"],
unresolved_status=statuses["on_unresolve"],
)
if self.issues_ignored_fields_key in data:
ignored_fields_text = data.pop(self.issues_ignored_fields_key)
# While we describe the config as a "comma-separated list", users are likely to
# accidentally use newlines, so we explicitly handle that case. On page
# refresh, they will see how it got interpreted as `get_config_data` will
# re-serialize the config as a comma-separated list.
ignored_fields_list = list(
filter(
None, [field.strip() for field in re.split(r"[,\n\r]+", ignored_fields_text)]
)
)
data[self.issues_ignored_fields_key] = ignored_fields_list
config.update(data)
org_integration = integration_service.update_organization_integration(
org_integration_id=self.org_integration.id,
config=config,
)
if org_integration is not None:
self.org_integration = org_integration
def _filter_active_projects(self, project_mappings: QuerySet[IntegrationExternalProject]):
project_ids_set = {p["id"] for p in self.get_client().get_projects_list()}
return [pm for pm in project_mappings if pm.external_id in project_ids_set]
def get_config_data(self):
config = self.org_integration.config
project_mappings = IntegrationExternalProject.objects.filter(
organization_integration_id=self.org_integration.id
)
sync_status_forward = {}
project_mappings = self._filter_active_projects(project_mappings)
for pm in project_mappings:
sync_status_forward[pm.external_id] = {
"on_unresolve": pm.unresolved_status,
"on_resolve": pm.resolved_status,
}
config["sync_status_forward"] = sync_status_forward
config[self.issues_ignored_fields_key] = ", ".join(
config.get(self.issues_ignored_fields_key, "")
)
return config
def sync_metadata(self):
client = self.get_client()
server_info = {}
projects = []
try:
server_info = client.get_server_info()
projects = client.get_projects_list()
except ApiError as e:
raise IntegrationError(self.message_from_error(e))
metadata = self.model.metadata.copy()
name = server_info["serverTitle"]
# There is no Jira instance icon (there is a favicon, but it doesn't seem
# possible to query that with the API). So instead we just use the first
# project Icon.
if len(projects) > 0:
avatar = projects[0]["avatarUrls"]["48x48"]
metadata.update({"icon": avatar})
integration_service.update_integration(
integration_id=self.model.id, name=name, metadata=metadata
)
def get_link_issue_config(self, group, **kwargs):
fields = super().get_link_issue_config(group, **kwargs)
org = group.organization
autocomplete_url = reverse("sentry-extensions-jira-search", args=[org.slug, self.model.id])
for field in fields:
if field["name"] == "externalIssue":
field["url"] = autocomplete_url
field["type"] = "select"
return fields
def get_issue_url(self, key: str) -> str:
return "{}/browse/{}".format(self.model.metadata["base_url"], key)
def get_persisted_default_config_fields(self) -> Sequence[str]:
return ["project", "issuetype", "priority", "labels"]
def get_persisted_user_default_config_fields(self):
return ["reporter"]
def get_persisted_ignored_fields(self):
return self.org_integration.config.get(self.issues_ignored_fields_key, [])
def get_feedback_issue_body(self, occurrence: IssueOccurrence) -> str:
messages = [
evidence for evidence in occurrence.evidence_display if evidence.name == "message"
]
others = [
evidence for evidence in occurrence.evidence_display if evidence.name != "message"
]
body = ""
for message in messages:
body += message.value
body += "\n\n"
for evidence in sorted(others, key=attrgetter("important"), reverse=True):
body += f"| *{evidence.name}* | {evidence.value} |\n"
return body.rstrip("\n") # remove the last new line
def get_generic_issue_body(self, event):
body = ""
important = event.occurrence.important_evidence_display
if important:
body = f"| *{important.name}* | {truncatechars(important.value, MAX_CHAR)} |\n"
for evidence in event.occurrence.evidence_display:
if evidence.important is False:
body += f"| *{evidence.name}* | {truncatechars(evidence.value, MAX_CHAR)} |\n"
return body[:-2] # chop off final newline
def get_group_description(self, group, event, **kwargs):
output = []
if group.issue_category == GroupCategory.FEEDBACK:
output = [
"Sentry Feedback: [{}|{}]\n".format(
group.qualified_short_id,
group.get_absolute_url(params={"referrer": "jira_integration"}),
)
]
else:
output = [
"Sentry Issue: [{}|{}]".format(
group.qualified_short_id,
group.get_absolute_url(params={"referrer": "jira_integration"}),
)
]
if isinstance(event, GroupEvent) and event.occurrence is not None:
body = ""
if group.issue_category == GroupCategory.FEEDBACK:
body = self.get_feedback_issue_body(event.occurrence)
else:
body = self.get_generic_issue_body(event)
output.extend([body])
else:
body = self.get_group_body(group, event)
if body:
output.extend(["", "{code}", body, "{code}"])
return "\n".join(output)
def get_client(self):
logging_context = {"org_id": self.organization_id}
if self.organization_id is not None:
logging_context["integration_id"] = attrgetter("org_integration.integration_id")(self)
logging_context["org_integration_id"] = attrgetter("org_integration.id")(self)
return JiraCloudClient(
integration=self.model,
verify_ssl=True,
logging_context=logging_context,
)
def get_issue(self, issue_id, **kwargs):
"""
Jira installation's implementation of IssueSyncIntegration's `get_issue`.
"""
client = self.get_client()
issue = client.get_issue(issue_id)
fields = issue.get("fields", {})
return {
"key": issue_id,
"title": fields.get("summary"),
"description": fields.get("description"),
}
def create_comment(self, issue_id, user_id, group_note):
# https://jira.atlassian.com/secure/WikiRendererHelpAction.jspa?section=texteffects
comment = group_note.data["text"]
quoted_comment = self.create_comment_attribution(user_id, comment)
try:
return self.get_client().create_comment(issue_id, quoted_comment)
except ApiUnauthorized as e:
raise IntegrationConfigurationError(
"Insufficient permissions to create a comment on the Jira issue."
) from e
except ApiError as e:
raise IntegrationError(
"There was an error creating a comment on the Jira issue."
) from e
def create_comment_attribution(self, user_id, comment_text):
user = user_service.get_user(user_id=user_id)
username = "Unknown User" if user is None else user.name
attribution = f"{username} wrote:\n\n"
return f"{attribution}{{quote}}{comment_text}{{quote}}"
def update_comment(self, issue_id, user_id, group_note):
quoted_comment = self.create_comment_attribution(user_id, group_note.data["text"])
try:
return self.get_client().update_comment(
issue_id, group_note.data["external_id"], quoted_comment
)
except ApiUnauthorized as e:
raise IntegrationConfigurationError(
"Insufficient permissions to update a comment on the Jira issue."
) from e
except ApiError as e:
raise IntegrationError(
"There was an error updating a comment on the Jira issue."
) from e
def search_issues(self, query: str | None, **kwargs) -> dict[str, Any]:
try:
resp = self.get_client().search_issues(query)
assert isinstance(resp, dict)
return resp
except ApiError as e:
self.raise_error(e)
def make_choices(self, values):
if not values:
return []
results = []
for item in values:
key = item.get("id", None)
if "name" in item:
value = item["name"]
elif "value" in item:
# Value based options prefer the value on submit.
key = item["value"]
value = item["value"]
elif "label" in item:
# Label based options prefer the value on submit.
key = item["label"]
value = item["label"]
else:
continue
results.append((key, value))
return results
def error_message_from_json(self, data):
message = ""
if data.get("errorMessages"):
message = " ".join(data["errorMessages"])
if data.get("errors"):
if message:
message += " "
message += " ".join(f"{k}: {v}" for k, v in data.get("errors").items())
return message
def error_fields_from_json(self, data):
errors = data.get("errors")
error_messages = data.get("errorMessages")
if not errors and not error_messages:
return None
error_data = {}
if error_messages:
# These may or may not contain field specific errors, so we manually
# map them
for message in error_messages:
for error_regex, key in CUSTOM_ERROR_MESSAGE_MATCHERS:
if error_regex.match(message):
error_data[key] = [message]
if errors:
for key, error in data.get("errors").items():
error_data[key] = [error]
if not error_data:
return None
return error_data
def search_url(self, org_slug):
"""
Hook method that varies in Jira Server
"""
return reverse("sentry-extensions-jira-search", args=[org_slug, self.model.id])
def build_dynamic_field(self, field_meta, group=None):
"""
Builds a field based on Jira's meta field information
"""
schema = field_meta["schema"]
# set up some defaults for form fields
fieldtype = "text"
fkwargs = {"label": field_meta["name"], "required": field_meta["required"]}
# override defaults based on field configuration
if (
schema["type"] in ["securitylevel", "priority"]
or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["select"]
):
fieldtype = "select"
fkwargs["choices"] = self.make_choices(field_meta.get("allowedValues"))
elif (
# Assignee and reporter fields
field_meta.get("autoCompleteUrl")
and (
schema.get("items") == "user"
or schema["type"] == "user"
or schema["type"] == "team"
or schema.get("items") == "team"
)
# Sprint and "Epic Link" fields
or schema.get("custom")
in (JIRA_CUSTOM_FIELD_TYPES["sprint"], JIRA_CUSTOM_FIELD_TYPES["epic"])
# Parent field
or schema["type"] == "issuelink"
):
fieldtype = "select"
fkwargs["url"] = self.search_url(self.organization.slug)
fkwargs["choices"] = []
elif schema["type"] in ["timetracking"]:
# TODO: Implement timetracking (currently unsupported altogether)
return None
elif schema.get("items") in ["worklog", "attachment"]:
# TODO: Implement worklogs and attachments someday
return None
elif schema["type"] == "array" and schema["items"] != "string":
fieldtype = "select"
fkwargs.update(
{
"multiple": True,
"choices": self.make_choices(field_meta.get("allowedValues")),
"default": "",
}
)
elif schema["type"] == "option" and len(field_meta.get("allowedValues", [])):
fieldtype = "select"
fkwargs.update(
{"choices": self.make_choices(field_meta.get("allowedValues")), "default": ""}
)
# break this out, since multiple field types could additionally
# be configured to use a custom property instead of a default.
if schema.get("custom"):
if schema["custom"] == JIRA_CUSTOM_FIELD_TYPES["textarea"]:
fieldtype = "textarea"
fkwargs["type"] = fieldtype
return fkwargs
def get_issue_type_meta(self, issue_type, meta):
self.parse_jira_issue_metadata(meta)
issue_types = meta["issuetypes"]
issue_type_meta = None
if issue_type:
matching_type = [t for t in issue_types if t["id"] == issue_type]
issue_type_meta = matching_type[0] if len(matching_type) > 0 else None
# still no issue type? just use the first one.
if not issue_type_meta:
issue_type_meta = issue_types[0]
return issue_type_meta
def get_issue_create_meta(self, client, project_id, jira_projects):
meta = None
if project_id:
meta = self.fetch_issue_create_meta(client, project_id)
if meta is not None:
return meta
# If we don't have a jira projectid (or we couldn't fetch the metadata from the given project_id),
# iterate all projects and find the first project that has metadata.
# We only want one project as getting all project metadata is expensive and wasteful.
# In the first run experience, the user won't have a 'last used' project id
# so we need to iterate available projects until we find one that we can get metadata for.
attempts = 0
if len(jira_projects):
for fallback in jira_projects:
attempts += 1
meta = self.fetch_issue_create_meta(client, fallback["id"])
if meta:
logger.info(
"jira.get-issue-create-meta.attempts",
extra={"organization_id": self.organization_id, "attempts": attempts},
)
return meta
jira_project_ids = "no projects"
if len(jira_projects):
jira_project_ids = ",".join(project["key"] for project in jira_projects)
logger.info(
"jira.get-issue-create-meta.no-metadata",
extra={
"organization_id": self.organization_id,
"attempts": attempts,
"jira_projects": jira_project_ids,
},
)
raise IntegrationError(
"Could not get issue create metadata for any Jira projects. "
"Ensure that your project permissions are correct."
)
def fetch_issue_create_meta(self, client, project_id):
try:
meta = client.get_create_meta_for_project(project_id)
except ApiUnauthorized:
logger.info(
"jira.fetch-issue-create-meta.unauthorized",
extra={"organization_id": self.organization_id, "jira_project": project_id},
)
raise IntegrationError(
"Jira returned: Unauthorized. " "Please check your configuration settings."
)
except ApiError as e:
logger.info(
"jira.fetch-issue-create-meta.error",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
"jira_project": project_id,
"error": str(e),
},
)
raise IntegrationError(
"There was an error communicating with the Jira API. "
"Please try again or contact support."
)
return meta
@all_silo_function
def get_create_issue_config(self, group: Group | None, user: RpcUser | User, **kwargs):
"""
We use the `group` to get three things: organization_slug, project
defaults, and default title and description. In the case where we're
getting `createIssueConfig` from Jira for Ticket Rules, we don't know
the issue group beforehand.
:param group: (Optional) Group model.
:param user: User model. TODO Make this the first parameter.
:param kwargs: (Optional) Object
* params: (Optional) Object
* params.project: (Optional) Sentry Project object
* params.issuetype: (Optional) String. The Jira issue type. For
example: "Bug", "Epic", "Story".
:return:
"""
kwargs = kwargs or {}
kwargs["link_referrer"] = "jira_integration"
params = kwargs.get("params", {})
fields = []
defaults = {}
if group:
fields = super().get_create_issue_config(group, user, **kwargs)
defaults = self.get_defaults(group.project, user)
project_id = params.get("project", defaults.get("project"))
client = self.get_client()
try:
jira_projects = client.get_projects_paginated({"maxResults": MAX_PER_PROJECT_QUERIES})[
"values"
]
except ApiError as e:
logger.info(
"jira.get-create-issue-config.no-projects",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
"error": str(e),
},
)
raise IntegrationError(
"Could not fetch project list from Jira. Ensure that Jira is"
" available and your account is still active."
)
meta = self.get_issue_create_meta(client, project_id, jira_projects)
if not meta:
raise IntegrationError(
"Could not fetch issue create metadata from Jira. Ensure that"
" the integration user has access to the requested project."
)
# check if the issuetype was passed as a parameter
issue_type = params.get("issuetype", defaults.get("issuetype"))
issue_type_meta = self.get_issue_type_meta(issue_type, meta)
issue_type_choices = self.make_choices(meta["issuetypes"])
# make sure default issue type is actually
# one that is allowed for project
if issue_type:
if not any(c for c in issue_type_choices if c[0] == issue_type):
issue_type = issue_type_meta["id"]
projects_form_field = {
"name": "project",
"label": "Jira Project",
"choices": [(p["id"], f"{p["key"]} - {p["name"]}") for p in jira_projects],
"default": meta["id"],
"type": "select",
"updatesForm": True,
"required": True,
}
paginated_projects_url = reverse(
"sentry-extensions-jira-search", args=[self.organization.slug, self.model.id]
)
projects_form_field["url"] = paginated_projects_url
fields = [
projects_form_field,
*fields,
{
"name": "issuetype",
"label": "Issue Type",
"default": issue_type or issue_type_meta["id"],
"type": "select",
"choices": issue_type_choices,
"updatesForm": True,
"required": bool(issue_type_choices), # required if we have any type choices
},
]
# title is renamed to summary before sending to Jira
standard_fields = [f["name"] for f in fields] + ["summary"]
ignored_fields = set()
ignored_fields.update(HIDDEN_ISSUE_FIELDS)
ignored_fields.update(self.get_persisted_ignored_fields())
# apply ordering to fields based on some known built-in Jira fields.
# otherwise weird ordering occurs.
anti_gravity = {
"priority": (-150, ""),
"fixVersions": (-125, ""),
"components": (-100, ""),
"security": (-50, ""),
}
dynamic_fields = list(issue_type_meta["fields"].keys())
# Sort based on priority, then field name
dynamic_fields.sort(key=lambda f: anti_gravity.get(f, (0, f)))
# Build up some dynamic fields based on what is required.
for field in dynamic_fields:
if field in standard_fields or field in [x.strip() for x in ignored_fields]:
# don't overwrite the fixed fields for the form.
continue
mb_field = self.build_dynamic_field(issue_type_meta["fields"][field], group)
if mb_field:
if mb_field["label"] in params.get("ignored", []):
continue
mb_field["name"] = field
fields.append(mb_field)
for field in fields:
if field["name"] == "priority":
# whenever priorities are available, put the available ones in the list.
# allowedValues for some reason doesn't pass enough info.
field["choices"] = self.make_choices(client.get_priorities())
field["default"] = defaults.get("priority", "")
elif field["name"] == "fixVersions":
field["choices"] = self.make_choices(client.get_versions(meta["key"]))
elif field["name"] == "labels":
field["default"] = defaults.get("labels", "")
elif field["name"] == "reporter":
reporter_id = defaults.get("reporter", "")
if not reporter_id:
continue
try:
reporter_info = client.get_user(reporter_id)
except ApiError as e:
logger.info(
"jira.get-create-issue-config.no-matching-reporter",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
"persisted_reporter_id": reporter_id,
"error": str(e),
},
)
continue
reporter_tuple = build_user_choice(reporter_info, client.user_id_field())
if not reporter_tuple:
continue
reporter_id, reporter_label = reporter_tuple
field["default"] = reporter_id
field["choices"] = [(reporter_id, reporter_label)]
return fields
def _clean_and_transform_issue_data(
self, issue_metadata: JiraIssueTypeMetadata, data: dict[str, Any]
) -> Any:
client = self.get_client()
transformed_data = transform_fields(
client.user_id_field(), issue_metadata.fields.values(), **data
)
return transformed_data
def create_issue(self, data, **kwargs):
client = self.get_client()
# protect against mis-configured integration submitting a form without an
# issuetype assigned.
if not data.get("issuetype"):
raise IntegrationFormError({"issuetype": ["Issue type is required."]})
jira_project = data.get("project")
if not jira_project:
raise IntegrationFormError({"project": ["Jira project is required"]})
try:
meta = client.get_create_meta_for_project(jira_project)
except ApiError as e:
self.raise_error(e)
if not meta:
raise IntegrationConfigurationError(
"Could not fetch issue create configuration from Jira."
)
issue_type_meta = self.get_issue_type_meta(data["issuetype"], meta)
cleaned_data = self._clean_and_transform_issue_data(
JiraIssueTypeMetadata.from_dict(issue_type_meta), data
)
try:
response = client.create_issue(cleaned_data)
except Exception as e:
self.raise_error(e)
issue_key = response.get("key")
if not issue_key:
raise IntegrationError("There was an error creating the issue.")
# Immediately fetch and return the created issue.
return self.get_issue(issue_key)
def raise_error(self, exc: Exception, identity: Identity | None = None) -> NoReturn:
"""
Overrides the base `raise_error` method to treat ApiInvalidRequestErrors
as configuration errors when we don't have error field handling for the
response.
This is because the majority of Jira errors we receive are external
configuration problems, like required fields missing.
"""
logging_context = {
"exception_type": type(exc).__name__,
"request_body": str(exc.json) if isinstance(exc, ApiError) else None,
}
if isinstance(exc, ApiError) and not exc.json:
logger.warning("sentry.jira.raise_error.non_json_error_response", extra=logging_context)
raise IntegrationConfigurationError(
"Something went wrong while communicating with Jira"
) from exc
if isinstance(exc, ApiInvalidRequestError):
error_fields = self.error_fields_from_json(exc.json)
if error_fields is not None:
raise IntegrationFormError(error_fields).with_traceback(sys.exc_info()[2])
logger.warning(
"sentry.jira.raise_error.generic_api_invalid_error", extra=logging_context
)
raise IntegrationConfigurationError(exc.text) from exc
super().raise_error(exc, identity=identity)
def sync_assignee_outbound(
self,
external_issue: ExternalIssue,
user: RpcUser | None,
assign: bool = True,
**kwargs: Any,
) -> None:
"""
Propagate a sentry issue's assignee to a jira issue's assignee
"""
client = self.get_client()
jira_user = None
if user and assign:
for ue in user.emails:
try:
possible_users = client.search_users_for_issue(external_issue.key, ue)
except (ApiUnauthorized, ApiError):
continue
for possible_user in possible_users:
email = possible_user.get("emailAddress")
# pull email from API if we can use it
if not email and self.use_email_scope:
account_id = possible_user.get("accountId")
email = client.get_email(account_id)
# match on lowercase email
if email and email.lower() == ue.lower():
jira_user = possible_user
break
if jira_user is None:
# TODO(jess): do we want to email people about these types of failures?
logger.info(
"jira.assignee-not-found",
extra={
"integration_id": external_issue.integration_id,
"user_id": user.id,
"user_emails": user.emails,
"issue_key": external_issue.key,
"organization_id": external_issue.organization_id,
},
)
if not user.emails:
raise IntegrationSyncTargetNotFound(
{
"email": "User must have a verified email on Sentry to sync assignee in Jira",
"help": "https://sentry.io/settings/account/emails",
}
)
raise IntegrationSyncTargetNotFound("No matching Jira user found.")
try:
id_field = client.user_id_field()
client.assign_issue(external_issue.key, jira_user and jira_user.get(id_field))
except ApiUnauthorized as e:
raise IntegrationConfigurationError(
"Insufficient permissions to assign user to the Jira issue."
) from e
except ApiError as e:
# TODO(jess): do we want to email people about these types of failures?
logger.info(
"jira.failed-to-assign",
exc_info=e,
extra={
"organization_id": external_issue.organization_id,
"integration_id": external_issue.integration_id,
"user_id": user.id if user else None,
"issue_key": external_issue.key,
},
)
raise IntegrationError("There was an error assigning the issue.") from e
def sync_status_outbound(
self, external_issue: ExternalIssue, is_resolved: bool, project_id: int
) -> None:
"""
Propagate a sentry issue's status to a linked issue's status.
"""
client = self.get_client()
jira_issue = client.get_issue(external_issue.key)
jira_project = jira_issue["fields"]["project"]
external_project = integration_service.get_integration_external_project(
organization_id=external_issue.organization_id,
integration_id=external_issue.integration_id,
external_id=jira_project["id"],
)
log_context = {
"integration_id": external_issue.integration_id,
"is_resolved": is_resolved,
"issue_key": external_issue.key,
}
if not external_project:
logger.info("jira.external-project-not-found", extra=log_context)
return
jira_status = (
external_project.resolved_status if is_resolved else external_project.unresolved_status
)
# don't bother updating if it's already the status we'd change it to
if jira_issue["fields"]["status"]["id"] == jira_status:
logger.info("jira.sync_status_outbound.unchanged", extra=log_context)
return
try:
transitions = client.get_transitions(external_issue.key)
except ApiHostError:
raise IntegrationError("Could not reach host to get transitions.")
try:
transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0]
except IndexError:
# TODO(jess): Email for failure
logger.warning("jira.status-sync-fail", extra=log_context)
return
try:
client.transition_issue(external_issue.key, transition["id"])
except ApiInvalidRequestError as e:
self.raise_error(e)
def _get_done_statuses(self):
client = self.get_client()
statuses = client.get_valid_statuses()
return {s["id"] for s in statuses if s["statusCategory"]["key"] == "done"}
def get_resolve_sync_action(self, data: Mapping[str, Any]) -> ResolveSyncAction:
done_statuses = self._get_done_statuses()
c_from = data["changelog"]["from"]
c_to = data["changelog"]["to"]
return ResolveSyncAction.from_resolve_unresolve(
should_resolve=c_to in done_statuses and c_from not in done_statuses,
should_unresolve=c_from in done_statuses and c_to not in done_statuses,
)
def migrate_issues(self):
migrate_issues.apply_async(
kwargs={
"integration_id": self.model.id,
"organization_id": self.organization_id,
}
)
def parse_jira_issue_metadata(
self, meta: dict[str, Any]
) -> dict[str, JiraIssueTypeMetadata] | None:
try:
return JiraIssueTypeMetadata.from_jira_meta_config(meta)
except Exception as e:
sentry_sdk.capture_exception(e)
return None
| JiraIntegration |
python | numba__numba | numba/cuda/tests/doc_examples/test_cpu_gpu_compat.py | {
"start": 220,
"end": 2305
} | class ____(CUDATestCase):
"""
Test compatibility of CPU and GPU functions
"""
def setUp(self):
# Prevent output from this test showing up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_cpu_gpu_compat(self):
# ex_cpu_gpu_compat.import.begin
from math import pi
import numba
from numba import cuda
# ex_cpu_gpu_compat.import.end
# ex_cpu_gpu_compat.allocate.begin
X = cuda.to_device([1, 10, 234])
Y = cuda.to_device([2, 2, 4014])
Z = cuda.to_device([3, 14, 2211])
results = cuda.to_device([0.0, 0.0, 0.0])
# ex_cpu_gpu_compat.allocate.end
# ex_cpu_gpu_compat.define.begin
@numba.jit
def business_logic(x, y, z):
return 4 * z * (2 * x - (4 * y) / 2 * pi)
# ex_cpu_gpu_compat.define.end
# ex_cpu_gpu_compat.cpurun.begin
print(business_logic(1, 2, 3)) # -126.79644737231007
# ex_cpu_gpu_compat.cpurun.end
# ex_cpu_gpu_compat.usegpu.begin
@cuda.jit
def f(res, xarr, yarr, zarr):
tid = cuda.grid(1)
if tid < len(xarr):
# The function decorated with numba.jit may be directly reused
res[tid] = business_logic(xarr[tid], yarr[tid], zarr[tid])
# ex_cpu_gpu_compat.usegpu.end
# ex_cpu_gpu_compat.launch.begin
f.forall(len(X))(results, X, Y, Z)
print(results)
# [-126.79644737231007, 416.28324559588634, -218912930.2987788]
# ex_cpu_gpu_compat.launch.end
expect = [
business_logic(x, y, z) for x, y, z in zip(X, Y, Z)
]
np.testing.assert_equal(
expect,
results.copy_to_host()
)
if __name__ == "__main__":
unittest.main()
| TestCpuGpuCompat |
python | getsentry__sentry | src/sentry/web/frontend/organization_avatar.py | {
"start": 164,
"end": 247
} | class ____(AvatarPhotoView):
model = OrganizationAvatar
| OrganizationAvatarPhotoView |
python | getsentry__sentry | tests/sentry/api/endpoints/test_assistant.py | {
"start": 285,
"end": 1514
} | class ____(APITestCase):
endpoint = "sentry-api-0-assistant"
@cached_property
def guides(self):
return manager.all()
def setUp(self) -> None:
super().setUp()
self.create_organization(owner=self.user)
self.login_as(user=self.user)
def test_simple(self) -> None:
resp = self.get_response()
assert resp.status_code == 200
assert len(resp.data) == len(manager.all())
for guide in resp.data:
assert guide["seen"] is False
def test_dismissed(self) -> None:
guide = "issue_stream"
AssistantActivity.objects.create(
user=self.user, guide_id=self.guides[guide], dismissed_ts=timezone.now()
)
resp = self.get_response()
assert resp.status_code == 200
assert {"guide": guide, "seen": True} in resp.data
def test_viewed(self) -> None:
guide = "issue_stream"
AssistantActivity.objects.create(
user=self.user, guide_id=self.guides[guide], viewed_ts=timezone.now()
)
resp = self.get_response()
assert resp.status_code == 200
assert {"guide": guide, "seen": True} in resp.data
@control_silo_test
| AssistantActivityTest |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 50763,
"end": 51747
} | class ____(TaskRunOrchestrationRule):
"""
Tracks the flow run attempt a task run state is associated with.
"""
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = {StateType.RUNNING}
async def after_transition(
self,
initial_state: states.State[Any] | None,
validated_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.TaskRun, core.TaskRunPolicy],
) -> None:
if context.run.flow_run_id is not None:
self.flow_run: orm_models.FlowRun | None = await context.flow_run()
if self.flow_run:
context.run.flow_run_run_count = self.flow_run.run_count
else:
raise ObjectNotFoundError(
(
"Unable to read flow run associated with task run:"
f" {context.run.id}, this flow run might have been deleted"
),
)
| UpdateFlowRunTrackerOnTasks |
python | scipy__scipy | benchmarks/benchmarks/signal.py | {
"start": 3203,
"end": 4810
} | class ____(Benchmark):
param_names = ['mode']
params = [
['full', 'valid', 'same']
]
def setup(self, mode):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = {'1d': [], '2d': []}
for ma, nb in product((1, 2, 8, 13, 30, 36, 50, 75), repeat=2):
a = rng.standard_normal(ma)
b = rng.standard_normal(nb)
pairs['1d'].append((a, b))
for n_image in [256, 512, 1024]:
for n_kernel in [3, 5, 7]:
x = rng.standard_normal((n_image, n_image))
h = rng.standard_normal((n_kernel, n_kernel))
pairs['2d'].append((x, h))
self.pairs = pairs
def time_convolve(self, mode):
for a, b in self.pairs['1d']:
if b.shape[0] > a.shape[0]:
continue
signal.convolve(a, b, mode=mode)
def time_convolve2d(self, mode):
for a, b in self.pairs['2d']:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.convolve(a, b, mode=mode)
def time_correlate(self, mode):
for a, b in self.pairs['1d']:
if b.shape[0] > a.shape[0]:
continue
signal.correlate(a, b, mode=mode)
def time_correlate2d(self, mode):
for a, b in self.pairs['2d']:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.correlate(a, b, mode=mode)
| Convolve |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/helpers/execution/run_steps.py | {
"start": 3933,
"end": 12029
} | class ____:
"""
A class to wrap a Step with its id and args.
Used to coordinate the execution of multiple steps inside a pipeline.
"""
id: CONNECTOR_TEST_STEP_ID
step: Step
args: ARGS_TYPE = field(default_factory=dict)
depends_on: List[str] = field(default_factory=list)
STEP_TREE = List[StepToRun | List[StepToRun]]
async def evaluate_run_args(args: ARGS_TYPE, results: RESULTS_DICT) -> Dict:
"""
Evaluate the args of a StepToRun using the results of previous steps.
"""
if inspect.iscoroutinefunction(args):
return await args(results)
elif callable(args):
return args(results)
elif isinstance(args, dict):
return args
raise TypeError(f"Unexpected args type: {type(args)}")
def _skip_remaining_steps(remaining_steps: STEP_TREE) -> RESULTS_DICT:
"""
Skip all remaining steps.
"""
skipped_results: Dict[str, StepResult] = {}
for runnable_step in remaining_steps:
if isinstance(runnable_step, StepToRun):
skipped_results[runnable_step.id] = runnable_step.step.skip()
elif isinstance(runnable_step, list):
nested_skipped_results = _skip_remaining_steps(list(runnable_step))
skipped_results = {**skipped_results, **nested_skipped_results}
else:
raise Exception(f"Unexpected step type: {type(runnable_step)}")
return skipped_results
def _step_dependencies_succeeded(step_to_eval: StepToRun, results: RESULTS_DICT) -> bool:
"""
Check if all dependencies of a step have succeeded.
"""
main_logger.info(f"Checking if dependencies {step_to_eval.depends_on} have succeeded")
# Check if all depends_on keys are in the results dict
# If not, that means a step has not been run yet
# Implying that the order of the steps are not correct
for step_id in step_to_eval.depends_on:
if step_id not in results:
raise InvalidStepConfiguration(
f"Step {step_to_eval.id} depends on {step_id} which has not been run yet. This implies that the order of the steps is not correct. Please check that the steps are in the correct order."
)
return all(
results[step_id] and (results[step_id].status is StepStatus.SUCCESS or not results[step_id].consider_in_overall_status)
for step_id in step_to_eval.depends_on
)
def _filter_skipped_steps(steps_to_evaluate: STEP_TREE, skip_steps: List[str], results: RESULTS_DICT) -> Tuple[STEP_TREE, RESULTS_DICT]:
"""
Filter out steps that should be skipped.
Either because they are in the skip list or because one of their dependencies failed.
"""
steps_to_run: STEP_TREE = []
for step_to_eval in steps_to_evaluate:
# ignore nested steps
if isinstance(step_to_eval, list):
steps_to_run.append(step_to_eval)
continue
# skip step if its id is in the skip list
if step_to_eval.id in skip_steps:
main_logger.info(f"Skipping step {step_to_eval.id}")
results[step_to_eval.id] = step_to_eval.step.skip("Skipped by user")
# skip step if a dependency failed
elif not _step_dependencies_succeeded(step_to_eval, results):
main_logger.info(
f"Skipping step {step_to_eval.id} because one of the dependencies have not been met: {step_to_eval.depends_on}"
)
results[step_to_eval.id] = step_to_eval.step.skip("Skipped because a dependency was not met")
else:
steps_to_run.append(step_to_eval)
return steps_to_run, results
def _get_next_step_group(steps: STEP_TREE) -> Tuple[STEP_TREE, STEP_TREE]:
"""
Get the next group of steps to run concurrently.
"""
if not steps:
return [], []
if isinstance(steps[0], list):
return list(steps[0]), list(steps[1:])
else:
# Termination case: if the next step is not a list that means we have reached the max depth
return steps, []
def _log_step_tree(step_tree: STEP_TREE, options: RunStepOptions, depth: int = 0) -> None:
"""
Log the step tree to the console.
e.g.
Step tree
- step1
- step2
- step3
- step4 (skip)
- step5
- step6
"""
indent = " "
for steps in step_tree:
if isinstance(steps, list):
_log_step_tree(list(steps), options, depth + 1)
else:
if steps.id in options.skip_steps:
main_logger.info(f"{indent * depth}- {steps.id} (skip)")
else:
main_logger.info(f"{indent * depth}- {steps.id}")
async def run_steps(
runnables: STEP_TREE,
results: RESULTS_DICT = {},
options: RunStepOptions = RunStepOptions(),
) -> RESULTS_DICT:
"""Run multiple steps sequentially, or in parallel if steps are wrapped into a sublist.
Examples
--------
>>> from pipelines.models.steps import Step, StepResult, StepStatus
>>> class TestStep(Step):
... async def _run(self) -> StepResult:
... return StepResult(step=self, status=StepStatus.SUCCESS)
>>> steps = [
... StepToRun(id="step1", step=TestStep()),
... [
... StepToRun(id="step2", step=TestStep()),
... StepToRun(id="step3", step=TestStep()),
... ],
... StepToRun(id="step4", step=TestStep()),
... ]
>>> results = await run_steps(steps)
>>> results["step1"].status
<StepStatus.SUCCESS: 1>
>>> results["step2"].status
<StepStatus.SUCCESS: 1>
>>> results["step3"].status
<StepStatus.SUCCESS: 1>
>>> results["step4"].status
<StepStatus.SUCCESS: 1>
Args:
runnables (List[StepToRun]): List of steps to run.
results (RESULTS_DICT, optional): Dictionary of step results, used for recursion.
Returns:
RESULTS_DICT: Dictionary of step results.
"""
# If there are no steps to run, return the results
if not runnables:
return results
step_ids_to_skip = options.get_step_ids_to_skip(runnables)
# Log the step tree
if options.log_step_tree:
main_logger.info(f"STEP TREE: {runnables}")
_log_step_tree(runnables, options)
options.log_step_tree = False
# If any of the previous steps failed, skip the remaining steps
if options.fail_fast and any(result.status is StepStatus.FAILURE and result.consider_in_overall_status for result in results.values()):
skipped_results = _skip_remaining_steps(runnables)
return {**results, **skipped_results}
# Pop the next step to run
steps_to_evaluate, remaining_steps = _get_next_step_group(runnables)
# Remove any skipped steps
steps_to_run, results = _filter_skipped_steps(steps_to_evaluate, step_ids_to_skip, results)
# Run all steps in list concurrently
semaphore = anyio.Semaphore(options.concurrency)
async with semaphore:
async with asyncer.create_task_group() as task_group:
tasks = []
for step_to_run in steps_to_run:
# if the step to run is a list, run it in parallel
if isinstance(step_to_run, list):
tasks.append(task_group.soonify(run_steps)(list(step_to_run), results, options))
else:
step_args = await evaluate_run_args(step_to_run.args, results)
step_to_run.step.extra_params = options.step_params.get(step_to_run.id, {})
main_logger.info(f"QUEUING STEP {step_to_run.id}")
tasks.append(task_group.soonify(step_to_run.step.run)(**step_args))
# Apply new results
new_results: Dict[str, Any] = {}
for i, task in enumerate(tasks):
step_to_run = steps_to_run[i]
if isinstance(step_to_run, list):
new_results = {**new_results, **task.value}
else:
new_results[step_to_run.id] = task.value
return await run_steps(
runnables=remaining_steps,
results={**results, **new_results},
options=options,
)
| StepToRun |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_coding_agents.py | {
"start": 1174,
"end": 1711
} | class ____(serializers.Serializer[dict[str, object]]):
integration_id = serializers.IntegerField(required=True)
run_id = serializers.IntegerField(required=True, min_value=1)
trigger_source = serializers.ChoiceField(
choices=[AutofixTriggerSource.ROOT_CAUSE, AutofixTriggerSource.SOLUTION],
default=AutofixTriggerSource.SOLUTION,
required=False,
)
instruction = serializers.CharField(required=False, allow_blank=True, max_length=4096)
@region_silo_endpoint
| OrganizationCodingAgentLaunchSerializer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F722_1.py | {
"start": 60,
"end": 311
} | class ____:
def f(self, arg: "this isn't python") -> "this isn't python either":
x: "this also isn't python" = 1
# No errors
@no_type_check
def f(arg: "this isn't python") -> "this isn't python either":
x: "this also isn't python" = 0
| C |
python | pallets__itsdangerous | src/itsdangerous/timed.py | {
"start": 533,
"end": 5955
} | class ____(Signer):
"""Works like the regular :class:`.Signer` but also records the time
of the signing and can be used to expire signatures. The
:meth:`unsign` method can raise :exc:`.SignatureExpired` if the
unsigning failed because the signature is expired.
"""
def get_timestamp(self) -> int:
"""Returns the current timestamp. The function must return an
integer.
"""
return int(time.time())
def timestamp_to_datetime(self, ts: int) -> datetime:
"""Convert the timestamp from :meth:`get_timestamp` into an
aware :class`datetime.datetime` in UTC.
.. versionchanged:: 2.0
The timestamp is returned as a timezone-aware ``datetime``
in UTC rather than a naive ``datetime`` assumed to be UTC.
"""
return datetime.fromtimestamp(ts, tz=timezone.utc)
def sign(self, value: str | bytes) -> bytes:
"""Signs the given string and also attaches time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
# Ignore overlapping signatures check, return_timestamp is the only
# parameter that affects the return type.
@t.overload
def unsign( # pyright: ignore
self,
signed_value: str | bytes,
max_age: int | None = None,
return_timestamp: t.Literal[False] = False,
) -> bytes: ...
@t.overload
def unsign(
self,
signed_value: str | bytes,
max_age: int | None = None,
return_timestamp: t.Literal[True] = True,
) -> tuple[bytes, datetime]: ...
def unsign(
self,
signed_value: str | bytes,
max_age: int | None = None,
return_timestamp: bool = False,
) -> tuple[bytes, datetime] | bytes:
"""Works like the regular :meth:`.Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If ``return_timestamp`` is ``True`` the
timestamp of the signature will be returned as an aware
:class:`datetime.datetime` object in UTC.
.. versionchanged:: 2.0
The timestamp is returned as a timezone-aware ``datetime``
in UTC rather than a naive ``datetime`` assumed to be UTC.
"""
try:
result = super().unsign(signed_value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b""
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in
# which we shouldn't have come except someone uses a time-based
# serializer on non-timestamp data, so catch that.
if sep not in result:
if sig_error:
raise sig_error
raise BadTimeSignature("timestamp missing", payload=result)
value, ts_bytes = result.rsplit(sep, 1)
ts_int: int | None = None
ts_dt: datetime | None = None
try:
ts_int = bytes_to_int(base64_decode(ts_bytes))
except Exception:
pass
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
if ts_int is not None:
try:
ts_dt = self.timestamp_to_datetime(ts_int)
except (ValueError, OSError, OverflowError) as exc:
# Windows raises OSError
# 32-bit raises OverflowError
raise BadTimeSignature(
"Malformed timestamp", payload=value
) from exc
raise BadTimeSignature(str(sig_error), payload=value, date_signed=ts_dt)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but we handle it anyway.
if ts_int is None:
raise BadTimeSignature("Malformed timestamp", payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - ts_int
if age > max_age:
raise SignatureExpired(
f"Signature age {age} > {max_age} seconds",
payload=value,
date_signed=self.timestamp_to_datetime(ts_int),
)
if age < 0:
raise SignatureExpired(
f"Signature age {age} < 0 seconds",
payload=value,
date_signed=self.timestamp_to_datetime(ts_int),
)
if return_timestamp:
return value, self.timestamp_to_datetime(ts_int)
return value
def validate(self, signed_value: str | bytes, max_age: int | None = None) -> bool:
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
| TimestampSigner |
python | psf__black | src/blib2to3/pytree.py | {
"start": 19863,
"end": 22503
} | class ____(BasePattern):
wildcards: bool = False
def __init__(
self,
type: int | None = None,
content: Iterable[str] | None = None,
name: str | None = None,
) -> None:
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
newcontent = list(content)
for i, item in enumerate(newcontent):
assert isinstance(item, BasePattern), (i, item)
# I don't even think this code is used anywhere, but it does cause
# unreachable errors from mypy. This function's signature does look
# odd though *shrug*.
if isinstance(item, WildcardPattern): # type: ignore[unreachable]
self.wildcards = True # type: ignore[unreachable]
self.type = type
self.content = newcontent # TODO: this is unbound when content is None
self.name = name
def _submatch(self, node, results=None) -> bool:
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
| NodePattern |
python | python-visualization__folium | folium/plugins/minimap.py | {
"start": 204,
"end": 4838
} | class ____(JSCSSMixin, MacroElement):
"""Add a minimap (locator) to an existing map.
Uses the Leaflet plugin by Norkart under BSD 2-Clause "Simplified" License.
https://github.com/Norkart/Leaflet-MiniMap
Parameters
----------
tile_layer : folium TileLayer object or str, default None
Provide a folium TileLayer object or the wanted tiles as string.
If not provided it will use the default of 'TileLayer', currently
OpenStreetMap.
position : str, default 'bottomright'
The standard Control position parameter for the widget.
width : int, default 150
The width of the minimap in pixels.
height : int, default 150
The height of the minimap in pixels.
collapsed_width : int, default 25
The width of the toggle marker and the minimap when collapsed in pixels.
collapsed_height : int, default 25
The height of the toggle marker and the minimap when collapsed
zoom_level_offset : int, default -5
The offset applied to the zoom in the minimap compared to the zoom of
the main map. Can be positive or negative.
zoom_level_fixed : int, default None
Overrides the offset to apply a fixed zoom level to the minimap
regardless of the main map zoom.
Set it to any valid zoom level, if unset zoom_level_offset is used
instead.
center_fixed : bool, default False
Applies a fixed position to the minimap regardless of the main map's
view / position. Prevents panning the minimap, but does allow zooming
(both in the minimap and the main map).
If the minimap is zoomed, it will always zoom around the centerFixed
point. You can pass in a LatLng-equivalent object.
zoom_animation : bool, default False
Sets whether the minimap should have an animated zoom.
(Will cause it to lag a bit after the movement of the main map.)
toggle_display : bool, default False
Sets whether the minimap should have a button to minimise it.
auto_toggle_display : bool, default False
Sets whether the minimap should hide automatically
if the parent map bounds does not fit within the minimap bounds.
Especially useful when 'zoomLevelFixed' is set.
minimized : bool, default False
Sets whether the minimap should start in a minimized position.
Examples
--------
>>> MiniMap(position="bottomleft")
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.tile_layer.get_name() }} = L.tileLayer(
{{ this.tile_layer.tiles|tojson }},
{{ this.tile_layer.options|tojson }}
);
var {{ this.get_name() }} = new L.Control.MiniMap(
{{ this.tile_layer.get_name() }},
{{ this.options|tojavascript }}
);
{{ this._parent.get_name() }}.addControl({{ this.get_name() }});
{% endmacro %}
"""
) # noqa
default_js = [
(
"Control_MiniMap_js",
"https://cdnjs.cloudflare.com/ajax/libs/leaflet-minimap/3.6.1/Control.MiniMap.js",
)
]
default_css = [
(
"Control_MiniMap_css",
"https://cdnjs.cloudflare.com/ajax/libs/leaflet-minimap/3.6.1/Control.MiniMap.css",
),
]
def __init__(
self,
tile_layer=None,
position="bottomright",
width=150,
height=150,
collapsed_width=25,
collapsed_height=25,
zoom_level_offset=-5,
zoom_level_fixed=None,
center_fixed=False,
zoom_animation=False,
toggle_display=False,
auto_toggle_display=False,
minimized=False,
**kwargs
):
super().__init__()
self._name = "MiniMap"
if tile_layer is None:
self.tile_layer = TileLayer()
elif isinstance(tile_layer, TileLayer):
self.tile_layer = tile_layer
else:
self.tile_layer = TileLayer(tile_layer)
self.options = remove_empty(
position=position,
width=width,
height=height,
collapsed_width=collapsed_width,
collapsed_height=collapsed_height,
zoom_level_offset=zoom_level_offset,
zoom_level_fixed=zoom_level_fixed,
center_fixed=center_fixed,
zoom_animation=zoom_animation,
toggle_display=toggle_display,
auto_toggle_display=auto_toggle_display,
minimized=minimized,
**kwargs
)
| MiniMap |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0017_remove_unused_indexes.py | {
"start": 150,
"end": 1190
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("core", "0016_update_dj_simple_history"),
]
operations = [
migrations.AlterField(
model_name="historicaluser",
name="extra_history_user_id",
field=models.IntegerField(blank=True, null=True, verbose_name="ID"),
),
migrations.AlterField(
model_name="historicaluser",
name="extra_history_user_username",
field=models.CharField(max_length=150, null=True, verbose_name="username"),
),
migrations.AlterField(
model_name="historicaluserprofile",
name="extra_history_user_id",
field=models.IntegerField(blank=True, null=True, verbose_name="ID"),
),
migrations.AlterField(
model_name="historicaluserprofile",
name="extra_history_user_username",
field=models.CharField(max_length=150, null=True, verbose_name="username"),
),
]
| Migration |
python | Textualize__textual | docs/examples/widgets/directory_tree_filtered.py | {
"start": 329,
"end": 512
} | class ____(App):
def compose(self) -> ComposeResult:
yield FilteredDirectoryTree("./")
if __name__ == "__main__":
app = DirectoryTreeApp()
app.run()
| DirectoryTreeApp |
python | ray-project__ray | release/train_tests/benchmark/image_classification/factory.py | {
"start": 9597,
"end": 11696
} | class ____(BaseDataLoaderFactory):
"""Factory for creating mock dataloaders for testing.
Provides mock implementations of training and validation dataloaders
that generate random image and label tensors.
"""
def get_train_dataloader(
self,
) -> Generator[Tuple[torch.Tensor, torch.Tensor], None, None]:
"""Get mock training dataloader.
Returns:
Generator yielding (image_tensor, label_tensor) batches
"""
dataloader_config = self.get_dataloader_config()
return mock_dataloader(
num_batches=1024, batch_size=dataloader_config.train_batch_size
)
def get_val_dataloader(
self,
) -> Generator[Tuple[torch.Tensor, torch.Tensor], None, None]:
"""Get mock validation dataloader.
Returns:
Generator yielding (image_tensor, label_tensor) batches
"""
dataloader_config = self.get_dataloader_config()
return mock_dataloader(
num_batches=512, batch_size=dataloader_config.validation_batch_size
)
def get_imagenet_data_dirs(task_config: ImageClassificationConfig) -> Dict[str, str]:
"""Returns a dict with the root imagenet dataset directories for train/val/test,
corresponding to the data format and local/s3 dataset location."""
from image_classification.imagenet import IMAGENET_LOCALFS_SPLIT_DIRS
from image_classification.jpeg.imagenet import (
IMAGENET_JPEG_SPLIT_S3_DIRS,
)
from image_classification.parquet.imagenet import (
IMAGENET_PARQUET_SPLIT_S3_DIRS,
)
data_format = task_config.image_classification_data_format
if task_config.image_classification_local_dataset:
return IMAGENET_LOCALFS_SPLIT_DIRS
if data_format == ImageClassificationConfig.ImageFormat.JPEG:
return IMAGENET_JPEG_SPLIT_S3_DIRS
elif data_format == ImageClassificationConfig.ImageFormat.PARQUET:
return IMAGENET_PARQUET_SPLIT_S3_DIRS
else:
raise ValueError(f"Unknown data format: {data_format}")
| ImageClassificationMockDataLoaderFactory |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/settings.py | {
"start": 1086,
"end": 1367
} | class ____(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws", "ecs"))
observer: EcsObserverSettings = Field(
description="Settings for controlling ECS observer behavior.",
default_factory=EcsObserverSettings,
)
| EcsSettings |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/core.py | {
"start": 39310,
"end": 39458
} | class ____:
def __init__(self, seed):
self.seed = seed
def __repr__(self):
return f"RandomSeeder({self.seed!r})"
| RandomSeeder |
python | pytorch__pytorch | torch/testing/_internal/common_modules.py | {
"start": 3065,
"end": 6713
} | class ____(_TestParametrizer):
""" PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """
def __init__(self, module_info_iterable, allowed_dtypes=None,
train_eval_mode=TrainEvalMode.train_and_eval, skip_if_dynamo=True):
self.module_info_list = list(module_info_iterable)
self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
self.train_eval_mode = train_eval_mode
self.skip_if_dynamo = skip_if_dynamo
def _get_training_flags(self, module_info):
training_flags = []
if (self.train_eval_mode == TrainEvalMode.train_only or
self.train_eval_mode == TrainEvalMode.train_and_eval):
training_flags.append(True)
if (self.train_eval_mode == TrainEvalMode.eval_only or
self.train_eval_mode == TrainEvalMode.train_and_eval):
training_flags.append(False)
# If train and eval modes don't differ for the module, don't bother using more than one.
if not module_info.train_and_eval_differ:
training_flags = training_flags[:1]
return training_flags
def _parametrize_test(self, test, generic_cls, device_cls):
if device_cls is None:
raise RuntimeError('The @modules decorator is only intended to be used in a device-specific '
'context; use it with instantiate_device_type_tests() instead of '
'instantiate_parametrized_tests()')
for module_info in self.module_info_list:
dtypes = set(module_info.supported_dtypes(device_cls.device_type))
if self.allowed_dtypes is not None:
dtypes = dtypes.intersection(self.allowed_dtypes)
training_flags = self._get_training_flags(module_info)
for (training, dtype) in product(training_flags, dtypes):
# Construct the test name; device / dtype parts are handled outside.
# See [Note: device and dtype suffix placement]
test_name = module_info.formatted_name
if len(training_flags) > 1:
test_name += f"_{'train_mode' if training else 'eval_mode'}"
# Construct parameter kwargs to pass to the test.
param_kwargs = {'module_info': module_info}
_update_param_kwargs(param_kwargs, 'dtype', dtype)
_update_param_kwargs(param_kwargs, 'training', training)
try:
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR:
test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper)
decorator_fn = partial(module_info.get_decorators, generic_cls.__name__,
test.__name__, device_cls.device_type, dtype)
yield (test_wrapper, test_name, param_kwargs, decorator_fn)
except Exception as ex:
# Provides an error message for debugging before rethrowing the exception
print(f"Failed to instantiate {test_name} for module {module_info.name}!")
raise ex
def get_module_common_name(module_cls):
if module_cls in MODULE_CLASS_NAMES:
# Example: "nn.Linear"
return MODULE_CLASS_NAMES[module_cls]
else:
return module_cls.__name__
| modules |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_checkbox06.py | {
"start": 315,
"end": 1921
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("checkbox06.xlsx")
def test_create_file_with_insert_checkbox(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "Col1")
worksheet.write(1, 0, 1)
worksheet.write(2, 0, 2)
worksheet.write(3, 0, 3)
worksheet.write(4, 0, 4)
worksheet.write(0, 1, "Col2")
worksheet.insert_checkbox(1, 1, True)
worksheet.insert_checkbox(2, 1, False)
worksheet.insert_checkbox(3, 1, False)
worksheet.insert_checkbox(4, 1, True)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_boolean_and_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"checkbox": True})
worksheet.write(0, 0, "Col1")
worksheet.write(1, 0, 1)
worksheet.write(2, 0, 2)
worksheet.write(3, 0, 3)
worksheet.write(4, 0, 4)
worksheet.write(0, 1, "Col2")
worksheet.write(1, 1, True, cell_format)
worksheet.write(2, 1, False, cell_format)
worksheet.write(3, 1, False, cell_format)
worksheet.write(4, 1, True, cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | bokeh__bokeh | tests/unit/bokeh/core/test_serialization.py | {
"start": 2614,
"end": 2843
} | class ____:
f0: int
f1: Sequence[int]
f2: SomeDataClass | None = None
f3: NotRequired[bool | None] = Unspecified
f4: NotRequired[SomeProps] = Unspecified
f5: NotRequired[SomeModel] = Unspecified
| SomeDataClass |
python | rq__rq | rq/executions.py | {
"start": 4291,
"end": 7079
} | class ____(BaseRegistry):
"""Class to represent a registry of job executions.
Each job has its own execution registry.
"""
key_template = 'rq:executions:{0}'
def __init__(self, job_id: str, connection: Redis):
self.connection = connection
self.job_id = job_id
self.key = self.key_template.format(job_id)
def cleanup(self, timestamp: Optional[float] = None, exception_handlers: Optional[list] = None):
"""Remove expired jobs from registry.
Removes jobs with an expiry time earlier than timestamp, specified as
seconds since the Unix epoch. timestamp defaults to call time if
unspecified.
"""
score = timestamp if timestamp is not None else current_timestamp()
self.connection.zremrangebyscore(self.key, 0, score)
def add(self, execution: Execution, ttl: int, pipeline: 'Pipeline') -> Any: # type: ignore
"""Register an execution to registry with expiry time of now + ttl, unless it's -1 which is set to +inf
Args:
execution (Execution): The Execution to add
ttl (int, optional): The time to live. Defaults to 0.
pipeline (Optional[Pipeline], optional): The Redis Pipeline. Defaults to None.
Returns:
result (int): The ZADD command result
"""
score = current_timestamp() + ttl
pipeline.zadd(self.key, {execution.id: score + 60})
# Still unsure how to handle registry TTL, but it should be the same as job TTL
pipeline.expire(self.key, ttl + 60)
return
def remove(self, execution: Execution, pipeline: 'Pipeline') -> Any: # type: ignore
"""Remove an execution from registry."""
return pipeline.zrem(self.key, execution.id)
def get_execution_ids(self, start: int = 0, end: int = -1) -> list[str]:
"""Returns all executions IDs in registry"""
self.cleanup()
return [as_text(job_id) for job_id in self.connection.zrange(self.key, start, end)]
def get_executions(self, start: int = 0, end: int = -1) -> list[Execution]:
"""Returns all executions IDs in registry"""
execution_ids = self.get_execution_ids(start, end)
executions = []
# TODO: This operation should be pipelined, preferably using Execution.fetch_many()
for execution_id in execution_ids:
executions.append(Execution.fetch(id=execution_id, job_id=self.job_id, connection=self.connection))
return executions
def delete(self, job: Job, pipeline: 'Pipeline'):
"""Delete the registry."""
executions = self.get_executions()
for execution in executions:
execution.delete(pipeline=pipeline, job=job)
pipeline.delete(self.key)
| ExecutionRegistry |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/batch_matmul_op_test.py | {
"start": 1716,
"end": 6058
} | class ____(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
# output's shape depends on adj[0] and adj[1]
if adjoint_a:
x = np.conjugate(np.swapaxes(x, -1, -2))
if adjoint_b:
y = np.conjugate(np.swapaxes(y, -1, -2))
return np.matmul(x, y)
# Compares TensorFlow BatchMatmul with NumPy's matmul.
def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
is_floating = x.dtype != np.int32
# np.finfo doesn't support bfloat16. So, we manually compute the eps which
# defines the difference between 1.0 and the next smallest representable
# float larger than 1.0. For bfloat16, the difference is 1/128.
if x.dtype == dtypes.bfloat16.as_numpy_dtype:
epsilon = 0.0078125
elif is_floating:
epsilon = np.finfo(x.dtype).eps
tol = 100 * epsilon if is_floating else 0
with self.cached_session(use_gpu=is_floating) as sess:
if static_shape:
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = self.evaluate(z0)
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)
z0 = math_ops.matmul(
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 1])
CompareNonEmpty(self, [1, 1, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])
CompareNonEmpty(self, [7, 1, 3], [7, 3, 5])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 1])
CompareNonEmpty(self, [7, 2, 3], [7, 3, 5])
CompareNonEmpty(self, [10, 64, 75], [10, 75, 30])
CompareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])
def _testBroadcasting(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareNonEmpty(self, a_shape, b_shape):
self._compare(
GetRandomNormalInput(a_shape, dtype),
GetRandomNormalInput(b_shape, dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareNonEmpty(self, [2, 3], [1, 3, 5])
CompareNonEmpty(self, [1, 2, 3], [3, 5])
CompareNonEmpty(self, [5, 1, 2, 3], [1, 7, 3, 5])
CompareNonEmpty(self, [5, 2, 2, 3], [3, 5])
CompareNonEmpty(self, [2, 3], [5, 2, 3, 5])
CompareNonEmpty(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])
CompareNonEmpty(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])
def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def CompareEmpty(self, a_shape, b_shape):
self._compare(
np.zeros(a_shape).astype(dtype),
np.zeros(b_shape).astype(dtype),
adjoint_a,
adjoint_b,
static_shape=use_static_shape)
CompareEmpty(self, [0, 3, 2], [0, 2, 4])
CompareEmpty(self, [3, 0, 2], [3, 2, 5])
CompareEmpty(self, [3, 3, 2], [3, 2, 0])
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
@test_util.run_without_tensor_float_32("Tests batch matmul")
def Test(self):
np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
def _GetBatchMatmulOpBroadcastingTest(dtype, adjoint_a, adjoint_b,
use_static_shape):
@test_util.run_without_tensor_float_32("Tests batch matmul")
def Test(self):
np.random.seed(42)
self._testBroadcasting(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
| BatchMatmulOpTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 2669,
"end": 2749
} | class ____:
def m(self, x: "Impl_ContraRecurs") -> None: ...
| Impl_ContraRecurs |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 611,
"end": 787
} | class ____(BaseMiddleware):
def process_exception(self, request, exception):
return HttpResponse("Exception caught")
@async_only_middleware
| ProcessExceptionMiddleware |
python | django__django | django/contrib/auth/middleware.py | {
"start": 910,
"end": 1550
} | class ____(MiddlewareMixin):
def process_request(self, request):
if not hasattr(request, "session"):
raise ImproperlyConfigured(
"The Django authentication middleware requires session "
"middleware to be installed. Edit your MIDDLEWARE setting to "
"insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
)
request.user = SimpleLazyObject(lambda: get_user(request))
request.auser = partial(auser, request)
| AuthenticationMiddleware |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/utils.py | {
"start": 144,
"end": 592
} | class ____:
@staticmethod
def default_process_exception(
exc_info: ExceptionInfo,
logger: logging.Logger,
log_message: str,
) -> SerializableErrorInfo:
error_info = serializable_error_info_from_exc_info(exc_info)
logger.exception(log_message)
return error_info
# global behavior for how to handle unexpected exceptions
process_exception = default_process_exception
| DaemonErrorCapture |
python | huggingface__transformers | src/transformers/models/lightglue/modular_lightglue.py | {
"start": 14443,
"end": 15034
} | class ____(CLIPMLP):
def __init__(self, config: LightGlueConfig):
super().__init__(config)
self.fc1 = nn.Linear(config.intermediate_size, config.intermediate_size)
self.layer_norm = nn.LayerNorm(config.intermediate_size, elementwise_affine=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
| LightGlueMLP |
python | python__mypy | mypy/constraints.py | {
"start": 27886,
"end": 80066
} | class ____(TypeVisitor[list[Constraint]]):
"""Visitor class for inferring type constraints."""
# The type that is compared against a template
# TODO: The value may be None. Is that actually correct?
actual: ProperType
def __init__(self, actual: ProperType, direction: int, skip_neg_op: bool) -> None:
# Direction must be SUBTYPE_OF or SUPERTYPE_OF.
self.actual = actual
self.direction = direction
# Whether to skip polymorphic inference (involves inference in opposite direction)
# this is used to prevent infinite recursion when both template and actual are
# generic callables.
self.skip_neg_op = skip_neg_op
# Trivial leaf types
def visit_unbound_type(self, template: UnboundType) -> list[Constraint]:
return []
def visit_any(self, template: AnyType) -> list[Constraint]:
return []
def visit_none_type(self, template: NoneType) -> list[Constraint]:
return []
def visit_uninhabited_type(self, template: UninhabitedType) -> list[Constraint]:
return []
def visit_erased_type(self, template: ErasedType) -> list[Constraint]:
return []
def visit_deleted_type(self, template: DeletedType) -> list[Constraint]:
return []
def visit_literal_type(self, template: LiteralType) -> list[Constraint]:
return []
# Errors
def visit_partial_type(self, template: PartialType) -> list[Constraint]:
# We can't do anything useful with a partial type here.
assert False, "Internal error"
# Non-trivial leaf type
def visit_type_var(self, template: TypeVarType) -> list[Constraint]:
assert False, (
"Unexpected TypeVarType in ConstraintBuilderVisitor"
" (should have been handled in infer_constraints)"
)
def visit_param_spec(self, template: ParamSpecType) -> list[Constraint]:
# Can't infer ParamSpecs from component values (only via Callable[P, T]).
return []
def visit_type_var_tuple(self, template: TypeVarTupleType) -> list[Constraint]:
raise NotImplementedError
def visit_unpack_type(self, template: UnpackType) -> list[Constraint]:
raise RuntimeError("Mypy bug: unpack should be handled at a higher level.")
def visit_parameters(self, template: Parameters) -> list[Constraint]:
# Constraining Any against C[P] turns into infer_against_any([P], Any)
if isinstance(self.actual, AnyType):
return self.infer_against_any(template.arg_types, self.actual)
if type_state.infer_polymorphic and isinstance(self.actual, Parameters):
# For polymorphic inference we need to be able to infer secondary constraints
# in situations like [x: T] <: P <: [x: int].
return infer_callable_arguments_constraints(template, self.actual, self.direction)
if type_state.infer_polymorphic and isinstance(self.actual, ParamSpecType):
# Similar for [x: T] <: Q <: Concatenate[int, P].
return infer_callable_arguments_constraints(
template, self.actual.prefix, self.direction
)
# There also may be unpatched types after a user error, simply ignore them.
return []
# Non-leaf types
def visit_instance(self, template: Instance) -> list[Constraint]:
original_actual = actual = self.actual
res: list[Constraint] = []
if isinstance(actual, (CallableType, Overloaded)) and template.type.is_protocol:
if "__call__" in template.type.protocol_members:
# Special case: a generic callback protocol
if not any(template == t for t in template.type.inferring):
template.type.inferring.append(template)
call = mypy.subtypes.find_member(
"__call__", template, actual, is_operator=True
)
assert call is not None
if (
self.direction == SUPERTYPE_OF
and mypy.subtypes.is_subtype(actual, erase_typevars(call))
or self.direction == SUBTYPE_OF
and mypy.subtypes.is_subtype(erase_typevars(call), actual)
):
res.extend(infer_constraints(call, actual, self.direction))
template.type.inferring.pop()
if isinstance(actual, CallableType) and actual.fallback is not None:
if (
actual.is_type_obj()
and template.type.is_protocol
and self.direction == SUPERTYPE_OF
):
ret_type = get_proper_type(actual.ret_type)
if isinstance(ret_type, TupleType):
ret_type = mypy.typeops.tuple_fallback(ret_type)
if isinstance(ret_type, Instance):
res.extend(
self.infer_constraints_from_protocol_members(
ret_type, template, ret_type, template, class_obj=True
)
)
actual = actual.fallback
if isinstance(actual, TypeType) and template.type.is_protocol:
if self.direction == SUPERTYPE_OF:
a_item = actual.item
if isinstance(a_item, Instance):
res.extend(
self.infer_constraints_from_protocol_members(
a_item, template, a_item, template, class_obj=True
)
)
# Infer constraints for Type[T] via metaclass of T when it makes sense.
if isinstance(a_item, TypeVarType):
a_item = get_proper_type(a_item.upper_bound)
if isinstance(a_item, Instance) and a_item.type.metaclass_type:
res.extend(
self.infer_constraints_from_protocol_members(
a_item.type.metaclass_type, template, actual, template
)
)
if isinstance(actual, Overloaded) and actual.fallback is not None:
actual = actual.fallback
if isinstance(actual, TypedDictType):
actual = actual.as_anonymous().fallback
if isinstance(actual, LiteralType):
actual = actual.fallback
if isinstance(actual, Instance):
instance = actual
erased = erase_typevars(template)
assert isinstance(erased, Instance) # type: ignore[misc]
# We always try nominal inference if possible,
# it is much faster than the structural one.
if self.direction == SUBTYPE_OF and template.type.has_base(instance.type.fullname):
mapped = map_instance_to_supertype(template, instance.type)
tvars = mapped.type.defn.type_vars
if instance.type.has_type_var_tuple_type:
# Variadic types need special handling to map each type argument to
# the correct corresponding type variable.
assert instance.type.type_var_tuple_prefix is not None
assert instance.type.type_var_tuple_suffix is not None
prefix_len = instance.type.type_var_tuple_prefix
suffix_len = instance.type.type_var_tuple_suffix
tvt = instance.type.defn.type_vars[prefix_len]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
i_prefix, i_middle, i_suffix = split_with_prefix_and_suffix(
instance.args, prefix_len, suffix_len
)
m_prefix, m_middle, m_suffix = split_with_prefix_and_suffix(
mapped.args, prefix_len, suffix_len
)
instance_args = i_prefix + (TupleType(list(i_middle), fallback),) + i_suffix
mapped_args = m_prefix + (TupleType(list(m_middle), fallback),) + m_suffix
else:
mapped_args = mapped.args
instance_args = instance.args
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
for tvar, mapped_arg, instance_arg in zip(tvars, mapped_args, instance_args):
if isinstance(tvar, TypeVarType):
# The constraints for generic type parameters depend on variance.
# Include constraints from both directions if invariant.
if tvar.variance != CONTRAVARIANT:
res.extend(infer_constraints(mapped_arg, instance_arg, self.direction))
if tvar.variance != COVARIANT:
res.extend(
infer_constraints(mapped_arg, instance_arg, neg_op(self.direction))
)
elif isinstance(tvar, ParamSpecType) and isinstance(mapped_arg, ParamSpecType):
prefix = mapped_arg.prefix
if isinstance(instance_arg, Parameters):
# No such thing as variance for ParamSpecs, consider them invariant
# TODO: constraints between prefixes using
# infer_callable_arguments_constraints()
suffix: Type = instance_arg.copy_modified(
instance_arg.arg_types[len(prefix.arg_types) :],
instance_arg.arg_kinds[len(prefix.arg_kinds) :],
instance_arg.arg_names[len(prefix.arg_names) :],
)
res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix))
res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix))
elif isinstance(instance_arg, ParamSpecType):
suffix = instance_arg.copy_modified(
prefix=Parameters(
instance_arg.prefix.arg_types[len(prefix.arg_types) :],
instance_arg.prefix.arg_kinds[len(prefix.arg_kinds) :],
instance_arg.prefix.arg_names[len(prefix.arg_names) :],
)
)
res.append(Constraint(mapped_arg, SUBTYPE_OF, suffix))
res.append(Constraint(mapped_arg, SUPERTYPE_OF, suffix))
elif isinstance(tvar, TypeVarTupleType):
# Handle variadic type variables covariantly for consistency.
res.extend(infer_constraints(mapped_arg, instance_arg, self.direction))
return res
elif self.direction == SUPERTYPE_OF and instance.type.has_base(template.type.fullname):
mapped = map_instance_to_supertype(instance, template.type)
tvars = template.type.defn.type_vars
if template.type.has_type_var_tuple_type:
# Variadic types need special handling to map each type argument to
# the correct corresponding type variable.
assert template.type.type_var_tuple_prefix is not None
assert template.type.type_var_tuple_suffix is not None
prefix_len = template.type.type_var_tuple_prefix
suffix_len = template.type.type_var_tuple_suffix
tvt = template.type.defn.type_vars[prefix_len]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
template.args, prefix_len, suffix_len
)
m_prefix, m_middle, m_suffix = split_with_prefix_and_suffix(
mapped.args, prefix_len, suffix_len
)
template_args = t_prefix + (TupleType(list(t_middle), fallback),) + t_suffix
mapped_args = m_prefix + (TupleType(list(m_middle), fallback),) + m_suffix
else:
mapped_args = mapped.args
template_args = template.args
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
for tvar, mapped_arg, template_arg in zip(tvars, mapped_args, template_args):
if isinstance(tvar, TypeVarType):
# The constraints for generic type parameters depend on variance.
# Include constraints from both directions if invariant.
if tvar.variance != CONTRAVARIANT:
res.extend(infer_constraints(template_arg, mapped_arg, self.direction))
if tvar.variance != COVARIANT:
res.extend(
infer_constraints(template_arg, mapped_arg, neg_op(self.direction))
)
elif isinstance(tvar, ParamSpecType) and isinstance(
template_arg, ParamSpecType
):
prefix = template_arg.prefix
if isinstance(mapped_arg, Parameters):
# No such thing as variance for ParamSpecs, consider them invariant
# TODO: constraints between prefixes using
# infer_callable_arguments_constraints()
suffix = mapped_arg.copy_modified(
mapped_arg.arg_types[len(prefix.arg_types) :],
mapped_arg.arg_kinds[len(prefix.arg_kinds) :],
mapped_arg.arg_names[len(prefix.arg_names) :],
)
res.append(Constraint(template_arg, SUBTYPE_OF, suffix))
res.append(Constraint(template_arg, SUPERTYPE_OF, suffix))
elif isinstance(mapped_arg, ParamSpecType):
suffix = mapped_arg.copy_modified(
prefix=Parameters(
mapped_arg.prefix.arg_types[len(prefix.arg_types) :],
mapped_arg.prefix.arg_kinds[len(prefix.arg_kinds) :],
mapped_arg.prefix.arg_names[len(prefix.arg_names) :],
)
)
res.append(Constraint(template_arg, SUBTYPE_OF, suffix))
res.append(Constraint(template_arg, SUPERTYPE_OF, suffix))
elif isinstance(tvar, TypeVarTupleType):
# Consider variadic type variables to be invariant.
res.extend(infer_constraints(template_arg, mapped_arg, SUBTYPE_OF))
res.extend(infer_constraints(template_arg, mapped_arg, SUPERTYPE_OF))
return res
if (
template.type.is_protocol
and self.direction == SUPERTYPE_OF
and
# We avoid infinite recursion for structural subtypes by checking
# whether this type already appeared in the inference chain.
# This is a conservative way to break the inference cycles.
# It never produces any "false" constraints but gives up soon
# on purely structural inference cycles, see #3829.
# Note that we use is_protocol_implementation instead of is_subtype
# because some type may be considered a subtype of a protocol
# due to _promote, but still not implement the protocol.
not any(template == t for t in reversed(template.type.inferring))
and mypy.subtypes.is_protocol_implementation(instance, erased, skip=["__call__"])
):
template.type.inferring.append(template)
res.extend(
self.infer_constraints_from_protocol_members(
instance, template, original_actual, template
)
)
template.type.inferring.pop()
return res
elif (
instance.type.is_protocol
and self.direction == SUBTYPE_OF
and
# We avoid infinite recursion for structural subtypes also here.
not any(instance == i for i in reversed(instance.type.inferring))
and mypy.subtypes.is_protocol_implementation(erased, instance, skip=["__call__"])
):
instance.type.inferring.append(instance)
res.extend(
self.infer_constraints_from_protocol_members(
instance, template, template, instance
)
)
instance.type.inferring.pop()
return res
if res:
return res
if isinstance(actual, AnyType):
return self.infer_against_any(template.args, actual)
if (
isinstance(actual, TupleType)
and is_named_instance(template, TUPLE_LIKE_INSTANCE_NAMES)
and self.direction == SUPERTYPE_OF
):
for item in actual.items:
if isinstance(item, UnpackType):
unpacked = get_proper_type(item.type)
if isinstance(unpacked, TypeVarTupleType):
# Cannot infer anything for T from [T, ...] <: *Ts
continue
assert (
isinstance(unpacked, Instance)
and unpacked.type.fullname == "builtins.tuple"
)
item = unpacked.args[0]
cb = infer_constraints(template.args[0], item, SUPERTYPE_OF)
res.extend(cb)
return res
elif isinstance(actual, TupleType) and self.direction == SUPERTYPE_OF:
return infer_constraints(template, mypy.typeops.tuple_fallback(actual), self.direction)
elif isinstance(actual, TypeVarType):
if not actual.values and not actual.id.is_meta_var():
return infer_constraints(template, actual.upper_bound, self.direction)
return []
elif isinstance(actual, ParamSpecType):
return infer_constraints(template, actual.upper_bound, self.direction)
elif isinstance(actual, TypeVarTupleType):
raise NotImplementedError
else:
return []
def infer_constraints_from_protocol_members(
self,
instance: Instance,
template: Instance,
subtype: Type,
protocol: Instance,
class_obj: bool = False,
) -> list[Constraint]:
"""Infer constraints for situations where either 'template' or 'instance' is a protocol.
The 'protocol' is the one of two that is an instance of protocol type, 'subtype'
is the type used to bind self during inference. Currently, we just infer constrains for
every protocol member type (both ways for settable members).
"""
res = []
for member in protocol.type.protocol_members:
inst = mypy.subtypes.find_member(member, instance, subtype, class_obj=class_obj)
temp = mypy.subtypes.find_member(member, template, subtype)
if inst is None or temp is None:
if member == "__call__":
continue
return [] # See #11020
# The above is safe since at this point we know that 'instance' is a subtype
# of (erased) 'template', therefore it defines all protocol members
if class_obj:
# For class objects we must only infer constraints if possible, otherwise it
# can lead to confusion between class and instance, for example StrEnum is
# Iterable[str] for an instance, but Iterable[StrEnum] for a class object.
if not mypy.subtypes.is_subtype(
inst, erase_typevars(temp), ignore_pos_arg_names=True
):
continue
# This exception matches the one in typeops.py, see PR #14121 for context.
if member == "__call__" and instance.type.is_metaclass(precise=True):
continue
res.extend(infer_constraints(temp, inst, self.direction))
if mypy.subtypes.IS_SETTABLE in mypy.subtypes.get_member_flags(member, protocol):
# Settable members are invariant, add opposite constraints
res.extend(infer_constraints(temp, inst, neg_op(self.direction)))
return res
def visit_callable_type(self, template: CallableType) -> list[Constraint]:
# Normalize callables before matching against each other.
# Note that non-normalized callables can be created in annotations
# using e.g. callback protocols.
# TODO: check that callables match? Ideally we should not infer constraints
# callables that can never be subtypes of one another in given direction.
template = template.with_unpacked_kwargs().with_normalized_var_args()
extra_tvars = False
if isinstance(self.actual, CallableType):
res: list[Constraint] = []
cactual = self.actual.with_unpacked_kwargs().with_normalized_var_args()
param_spec = template.param_spec()
template_ret_type, cactual_ret_type = template.ret_type, cactual.ret_type
if template.type_guard is not None and cactual.type_guard is not None:
template_ret_type = template.type_guard
cactual_ret_type = cactual.type_guard
if template.type_is is not None and cactual.type_is is not None:
template_ret_type = template.type_is
cactual_ret_type = cactual.type_is
res.extend(infer_constraints(template_ret_type, cactual_ret_type, self.direction))
if param_spec is None:
# TODO: Erase template variables if it is generic?
if (
type_state.infer_polymorphic
and cactual.variables
and not self.skip_neg_op
# Technically, the correct inferred type for application of e.g.
# Callable[..., T] -> Callable[..., T] (with literal ellipsis), to a generic
# like U -> U, should be Callable[..., Any], but if U is a self-type, we can
# allow it to leak, to be later bound to self. A bunch of existing code
# depends on this old behaviour.
and not (
any(tv.id.is_self() for tv in cactual.variables)
and template.is_ellipsis_args
)
):
# If the actual callable is generic, infer constraints in the opposite
# direction, and indicate to the solver there are extra type variables
# to solve for (see more details in mypy/solve.py).
res.extend(
infer_constraints(
cactual, template, neg_op(self.direction), skip_neg_op=True
)
)
extra_tvars = True
# We can't infer constraints from arguments if the template is Callable[..., T]
# (with literal '...').
if not template.is_ellipsis_args:
unpack_present = find_unpack_in_list(template.arg_types)
# When both ParamSpec and TypeVarTuple are present, things become messy
# quickly. For now, we only allow ParamSpec to "capture" TypeVarTuple,
# but not vice versa.
# TODO: infer more from prefixes when possible.
if unpack_present is not None and not cactual.param_spec():
# We need to re-normalize args to the form they appear in tuples,
# for callables we always pack the suffix inside another tuple.
unpack = template.arg_types[unpack_present]
assert isinstance(unpack, UnpackType)
tuple_type = get_tuple_fallback_from_unpack(unpack)
template_types = repack_callable_args(template, tuple_type)
actual_types = repack_callable_args(cactual, tuple_type)
# Now we can use the same general helper as for tuple types.
unpack_constraints = build_constraints_for_simple_unpack(
template_types, actual_types, neg_op(self.direction)
)
res.extend(unpack_constraints)
else:
# TODO: do we need some special-casing when unpack is present in actual
# callable but not in template callable?
res.extend(
infer_callable_arguments_constraints(template, cactual, self.direction)
)
else:
prefix = param_spec.prefix
prefix_len = len(prefix.arg_types)
cactual_ps = cactual.param_spec()
if type_state.infer_polymorphic and cactual.variables and not self.skip_neg_op:
# Similar logic to the branch above.
res.extend(
infer_constraints(
cactual, template, neg_op(self.direction), skip_neg_op=True
)
)
extra_tvars = True
# Compare prefixes as well
cactual_prefix = cactual.copy_modified(
arg_types=cactual.arg_types[:prefix_len],
arg_kinds=cactual.arg_kinds[:prefix_len],
arg_names=cactual.arg_names[:prefix_len],
)
res.extend(
infer_callable_arguments_constraints(prefix, cactual_prefix, self.direction)
)
param_spec_target: Type | None = None
if not cactual_ps:
max_prefix_len = len([k for k in cactual.arg_kinds if k in (ARG_POS, ARG_OPT)])
prefix_len = min(prefix_len, max_prefix_len)
param_spec_target = Parameters(
arg_types=cactual.arg_types[prefix_len:],
arg_kinds=cactual.arg_kinds[prefix_len:],
arg_names=cactual.arg_names[prefix_len:],
variables=cactual.variables if not type_state.infer_polymorphic else [],
imprecise_arg_kinds=cactual.imprecise_arg_kinds,
)
else:
if len(param_spec.prefix.arg_types) <= len(cactual_ps.prefix.arg_types):
param_spec_target = cactual_ps.copy_modified(
prefix=Parameters(
arg_types=cactual_ps.prefix.arg_types[prefix_len:],
arg_kinds=cactual_ps.prefix.arg_kinds[prefix_len:],
arg_names=cactual_ps.prefix.arg_names[prefix_len:],
imprecise_arg_kinds=cactual_ps.prefix.imprecise_arg_kinds,
)
)
if param_spec_target is not None:
res.append(Constraint(param_spec, self.direction, param_spec_target))
if extra_tvars:
for c in res:
c.extra_tvars += cactual.variables
return res
elif isinstance(self.actual, AnyType):
param_spec = template.param_spec()
any_type = AnyType(TypeOfAny.from_another_any, source_any=self.actual)
if param_spec is None:
# FIX what if generic
res = self.infer_against_any(template.arg_types, self.actual)
else:
res = [
Constraint(
param_spec,
SUBTYPE_OF,
Parameters([any_type, any_type], [ARG_STAR, ARG_STAR2], [None, None]),
)
]
res.extend(infer_constraints(template.ret_type, any_type, self.direction))
return res
elif isinstance(self.actual, Overloaded):
return self.infer_against_overloaded(self.actual, template)
elif isinstance(self.actual, TypeType):
return infer_constraints(template.ret_type, self.actual.item, self.direction)
elif isinstance(self.actual, Instance):
# Instances with __call__ method defined are considered structural
# subtypes of Callable with a compatible signature.
call = mypy.subtypes.find_member(
"__call__", self.actual, self.actual, is_operator=True
)
if call:
return infer_constraints(template, call, self.direction)
else:
return []
else:
return []
def infer_against_overloaded(
self, overloaded: Overloaded, template: CallableType
) -> list[Constraint]:
# Create constraints by matching an overloaded type against a template.
# This is tricky to do in general. We cheat by only matching against
# the first overload item that is callable compatible. This
# seems to work somewhat well, but we should really use a more
# reliable technique.
item = find_matching_overload_item(overloaded, template)
return infer_constraints(template, item, self.direction)
def visit_tuple_type(self, template: TupleType) -> list[Constraint]:
actual = self.actual
unpack_index = find_unpack_in_list(template.items)
is_varlength_tuple = (
isinstance(actual, Instance) and actual.type.fullname == "builtins.tuple"
)
if isinstance(actual, TupleType) or is_varlength_tuple:
res: list[Constraint] = []
if unpack_index is not None:
if is_varlength_tuple:
# Variadic tuple can be only a supertype of a tuple type, but even if
# direction is opposite, inferring something may give better error messages.
unpack_type = template.items[unpack_index]
assert isinstance(unpack_type, UnpackType)
unpacked_type = get_proper_type(unpack_type.type)
if isinstance(unpacked_type, TypeVarTupleType):
res = [
Constraint(type_var=unpacked_type, op=self.direction, target=actual)
]
else:
assert (
isinstance(unpacked_type, Instance)
and unpacked_type.type.fullname == "builtins.tuple"
)
res = infer_constraints(unpacked_type, actual, self.direction)
assert isinstance(actual, Instance) # ensured by is_varlength_tuple == True
for i, ti in enumerate(template.items):
if i == unpack_index:
# This one we just handled above.
continue
# For Tuple[T, *Ts, S] <: tuple[X, ...] infer also T <: X and S <: X.
res.extend(infer_constraints(ti, actual.args[0], self.direction))
return res
else:
assert isinstance(actual, TupleType)
unpack_constraints = build_constraints_for_simple_unpack(
template.items, actual.items, self.direction
)
actual_items: tuple[Type, ...] = ()
template_items: tuple[Type, ...] = ()
res.extend(unpack_constraints)
elif isinstance(actual, TupleType):
a_unpack_index = find_unpack_in_list(actual.items)
if a_unpack_index is not None:
# The case where template tuple doesn't have an unpack, but actual tuple
# has an unpack. We can infer something if actual unpack is a variadic tuple.
# Tuple[T, S, U] <: tuple[X, *tuple[Y, ...], Z] => T <: X, S <: Y, U <: Z.
a_unpack = actual.items[a_unpack_index]
assert isinstance(a_unpack, UnpackType)
a_unpacked = get_proper_type(a_unpack.type)
if len(actual.items) + 1 <= len(template.items):
a_prefix_len = a_unpack_index
a_suffix_len = len(actual.items) - a_unpack_index - 1
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
tuple(template.items), a_prefix_len, a_suffix_len
)
actual_items = tuple(actual.items[:a_prefix_len])
if a_suffix_len:
actual_items += tuple(actual.items[-a_suffix_len:])
template_items = t_prefix + t_suffix
if isinstance(a_unpacked, Instance):
assert a_unpacked.type.fullname == "builtins.tuple"
for tm in t_middle:
res.extend(
infer_constraints(tm, a_unpacked.args[0], self.direction)
)
else:
actual_items = ()
template_items = ()
else:
actual_items = tuple(actual.items)
template_items = tuple(template.items)
else:
return res
# Cases above will return if actual wasn't a TupleType.
assert isinstance(actual, TupleType)
if len(actual_items) == len(template_items):
if (
actual.partial_fallback.type.is_named_tuple
and template.partial_fallback.type.is_named_tuple
):
# For named tuples using just the fallbacks usually gives better results.
return res + infer_constraints(
template.partial_fallback, actual.partial_fallback, self.direction
)
for i in range(len(template_items)):
res.extend(
infer_constraints(template_items[i], actual_items[i], self.direction)
)
res.extend(
infer_constraints(
template.partial_fallback, actual.partial_fallback, self.direction
)
)
return res
elif isinstance(actual, AnyType):
return self.infer_against_any(template.items, actual)
else:
return []
def visit_typeddict_type(self, template: TypedDictType) -> list[Constraint]:
actual = self.actual
if isinstance(actual, TypedDictType):
res: list[Constraint] = []
# NOTE: Non-matching keys are ignored. Compatibility is checked
# elsewhere so this shouldn't be unsafe.
for item_name, template_item_type, actual_item_type in template.zip(actual):
res.extend(infer_constraints(template_item_type, actual_item_type, self.direction))
return res
elif isinstance(actual, AnyType):
return self.infer_against_any(template.items.values(), actual)
else:
return []
def visit_union_type(self, template: UnionType) -> list[Constraint]:
assert False, (
"Unexpected UnionType in ConstraintBuilderVisitor"
" (should have been handled in infer_constraints)"
)
def visit_type_alias_type(self, template: TypeAliasType) -> list[Constraint]:
assert False, f"This should be never called, got {template}"
def infer_against_any(self, types: Iterable[Type], any_type: AnyType) -> list[Constraint]:
res: list[Constraint] = []
# Some items may be things like `*Tuple[*Ts, T]` for example from callable types with
# suffix after *arg, so flatten them.
for t in flatten_nested_tuples(types):
if isinstance(t, UnpackType):
if isinstance(t.type, TypeVarTupleType):
res.append(Constraint(t.type, self.direction, any_type))
else:
unpacked = get_proper_type(t.type)
assert isinstance(unpacked, Instance)
res.extend(infer_constraints(unpacked, any_type, self.direction))
else:
# Note that we ignore variance and simply always use the
# original direction. This is because for Any targets direction is
# irrelevant in most cases, see e.g. is_same_constraint().
res.extend(infer_constraints(t, any_type, self.direction))
return res
def visit_overloaded(self, template: Overloaded) -> list[Constraint]:
if isinstance(self.actual, CallableType):
items = find_matching_overload_items(template, self.actual)
else:
items = template.items
res: list[Constraint] = []
for t in items:
res.extend(infer_constraints(t, self.actual, self.direction))
return res
def visit_type_type(self, template: TypeType) -> list[Constraint]:
if isinstance(self.actual, CallableType):
return infer_constraints(template.item, self.actual.ret_type, self.direction)
elif isinstance(self.actual, Overloaded):
return infer_constraints(template.item, self.actual.items[0].ret_type, self.direction)
elif isinstance(self.actual, TypeType):
return infer_constraints(template.item, self.actual.item, self.direction)
elif isinstance(self.actual, AnyType):
return infer_constraints(template.item, self.actual, self.direction)
else:
return []
def neg_op(op: int) -> int:
"""Map SubtypeOf to SupertypeOf and vice versa."""
if op == SUBTYPE_OF:
return SUPERTYPE_OF
elif op == SUPERTYPE_OF:
return SUBTYPE_OF
else:
raise ValueError(f"Invalid operator {op}")
def find_matching_overload_item(overloaded: Overloaded, template: CallableType) -> CallableType:
"""Disambiguate overload item against a template."""
items = overloaded.items
for item in items:
# Return type may be indeterminate in the template, so ignore it when performing a
# subtype check.
if mypy.subtypes.is_callable_compatible(
item,
template,
is_compat=mypy.subtypes.is_subtype,
is_proper_subtype=False,
ignore_return=True,
):
return item
# Fall back to the first item if we can't find a match. This is totally arbitrary --
# maybe we should just bail out at this point.
return items[0]
def find_matching_overload_items(
overloaded: Overloaded, template: CallableType
) -> list[CallableType]:
"""Like find_matching_overload_item, but return all matches, not just the first."""
items = overloaded.items
res = []
for item in items:
# Return type may be indeterminate in the template, so ignore it when performing a
# subtype check.
if mypy.subtypes.is_callable_compatible(
item,
template,
is_compat=mypy.subtypes.is_subtype,
is_proper_subtype=False,
ignore_return=True,
):
res.append(item)
if not res:
# Falling back to all items if we can't find a match is pretty arbitrary, but
# it maintains backward compatibility.
res = items.copy()
return res
def get_tuple_fallback_from_unpack(unpack: UnpackType) -> TypeInfo:
"""Get builtins.tuple type from available types to construct homogeneous tuples."""
tp = get_proper_type(unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
return tp.type
if isinstance(tp, TypeVarTupleType):
return tp.tuple_fallback.type
if isinstance(tp, TupleType):
for base in tp.partial_fallback.type.mro:
if base.fullname == "builtins.tuple":
return base
assert False, "Invalid unpack type"
def repack_callable_args(callable: CallableType, tuple_type: TypeInfo) -> list[Type]:
"""Present callable with star unpack in a normalized form.
Since positional arguments cannot follow star argument, they are packed in a suffix,
while prefix is represented as individual positional args. We want to put all in a single
list with unpack in the middle, and prefix/suffix on the sides (as they would appear
in e.g. a TupleType).
"""
if ARG_STAR not in callable.arg_kinds:
return callable.arg_types
star_index = callable.arg_kinds.index(ARG_STAR)
arg_types = callable.arg_types[:star_index]
star_type = callable.arg_types[star_index]
suffix_types = []
if not isinstance(star_type, UnpackType):
# Re-normalize *args: X -> *args: *tuple[X, ...]
star_type = UnpackType(Instance(tuple_type, [star_type]))
else:
tp = get_proper_type(star_type.type)
if isinstance(tp, TupleType):
assert isinstance(tp.items[0], UnpackType)
star_type = tp.items[0]
suffix_types = tp.items[1:]
return arg_types + [star_type] + suffix_types
def build_constraints_for_simple_unpack(
template_args: list[Type], actual_args: list[Type], direction: int
) -> list[Constraint]:
"""Infer constraints between two lists of types with variadic items.
This function is only supposed to be called when a variadic item is present in templates.
If there is no variadic item the actuals, we simply use split_with_prefix_and_suffix()
and infer prefix <: prefix, suffix <: suffix, variadic <: middle. If there is a variadic
item in the actuals we need to be more careful, only common prefix/suffix can generate
constraints, also we can only infer constraints for variadic template item, if template
prefix/suffix are shorter that actual ones, otherwise there may be partial overlap
between variadic items, for example if template prefix is longer:
templates: T1, T2, Ts, Ts, Ts, ...
actuals: A1, As, As, As, ...
Note: this function can only be called for builtin variadic constructors: Tuple and Callable.
For instances, you should first find correct type argument mapping.
"""
template_unpack = find_unpack_in_list(template_args)
assert template_unpack is not None
template_prefix = template_unpack
template_suffix = len(template_args) - template_prefix - 1
t_unpack = None
res = []
actual_unpack = find_unpack_in_list(actual_args)
if actual_unpack is None:
t_unpack = template_args[template_unpack]
if template_prefix + template_suffix > len(actual_args):
# These can't be subtypes of each-other, return fast.
assert isinstance(t_unpack, UnpackType)
if isinstance(t_unpack.type, TypeVarTupleType):
# Set TypeVarTuple to empty to improve error messages.
return [
Constraint(
t_unpack.type, direction, TupleType([], t_unpack.type.tuple_fallback)
)
]
else:
return []
common_prefix = template_prefix
common_suffix = template_suffix
else:
actual_prefix = actual_unpack
actual_suffix = len(actual_args) - actual_prefix - 1
common_prefix = min(template_prefix, actual_prefix)
common_suffix = min(template_suffix, actual_suffix)
if actual_prefix >= template_prefix and actual_suffix >= template_suffix:
# This is the only case where we can guarantee there will be no partial overlap
# (note however partial overlap is OK for variadic tuples, it is handled below).
t_unpack = template_args[template_unpack]
# Handle constraints from prefixes/suffixes first.
start, middle, end = split_with_prefix_and_suffix(
tuple(actual_args), common_prefix, common_suffix
)
for t, a in zip(template_args[:common_prefix], start):
res.extend(infer_constraints(t, a, direction))
if common_suffix:
for t, a in zip(template_args[-common_suffix:], end):
res.extend(infer_constraints(t, a, direction))
if t_unpack is not None:
# Add constraint(s) for variadic item when possible.
assert isinstance(t_unpack, UnpackType)
tp = get_proper_type(t_unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
# Homogeneous case *tuple[T, ...] <: [X, Y, Z, ...].
for a in middle:
# TODO: should we use union instead of join here?
if not isinstance(a, UnpackType):
res.extend(infer_constraints(tp.args[0], a, direction))
else:
a_tp = get_proper_type(a.type)
# This is the case *tuple[T, ...] <: *tuple[A, ...].
if isinstance(a_tp, Instance) and a_tp.type.fullname == "builtins.tuple":
res.extend(infer_constraints(tp.args[0], a_tp.args[0], direction))
elif isinstance(tp, TypeVarTupleType):
res.append(Constraint(tp, direction, TupleType(list(middle), tp.tuple_fallback)))
elif actual_unpack is not None:
# A special case for a variadic tuple unpack, we simply infer T <: X from
# Tuple[..., *tuple[T, ...], ...] <: Tuple[..., *tuple[X, ...], ...].
actual_unpack_type = actual_args[actual_unpack]
assert isinstance(actual_unpack_type, UnpackType)
a_unpacked = get_proper_type(actual_unpack_type.type)
if isinstance(a_unpacked, Instance) and a_unpacked.type.fullname == "builtins.tuple":
t_unpack = template_args[template_unpack]
assert isinstance(t_unpack, UnpackType)
tp = get_proper_type(t_unpack.type)
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
res.extend(infer_constraints(tp.args[0], a_unpacked.args[0], direction))
return res
def infer_directed_arg_constraints(left: Type, right: Type, direction: int) -> list[Constraint]:
"""Infer constraints between two arguments using direction between original callables."""
if isinstance(left, (ParamSpecType, UnpackType)) or isinstance(
right, (ParamSpecType, UnpackType)
):
# This avoids bogus constraints like T <: P.args
# TODO: can we infer something useful for *T vs P?
return []
if direction == SUBTYPE_OF:
# We invert direction to account for argument contravariance.
return infer_constraints(left, right, neg_op(direction))
else:
return infer_constraints(right, left, neg_op(direction))
def infer_callable_arguments_constraints(
template: NormalizedCallableType | Parameters,
actual: NormalizedCallableType | Parameters,
direction: int,
) -> list[Constraint]:
"""Infer constraints between argument types of two callables.
This function essentially extracts four steps from are_parameters_compatible() in
subtypes.py that involve subtype checks between argument types. We keep the argument
matching logic, but ignore various strictness flags present there, and checks that
do not involve subtyping. Then in place of every subtype check we put an infer_constraints()
call for the same types.
"""
res = []
if direction == SUBTYPE_OF:
left, right = template, actual
else:
left, right = actual, template
left_star = left.var_arg()
left_star2 = left.kw_arg()
right_star = right.var_arg()
right_star2 = right.kw_arg()
# Numbering of steps below matches the one in are_parameters_compatible() for convenience.
# Phase 1a: compare star vs star arguments.
if left_star is not None and right_star is not None:
res.extend(infer_directed_arg_constraints(left_star.typ, right_star.typ, direction))
if left_star2 is not None and right_star2 is not None:
res.extend(infer_directed_arg_constraints(left_star2.typ, right_star2.typ, direction))
# Phase 1b: compare left args with corresponding non-star right arguments.
for right_arg in right.formal_arguments():
left_arg = mypy.typeops.callable_corresponding_argument(left, right_arg)
if left_arg is None:
continue
res.extend(infer_directed_arg_constraints(left_arg.typ, right_arg.typ, direction))
# Phase 1c: compare left args with right *args.
if right_star is not None:
right_by_position = right.try_synthesizing_arg_from_vararg(None)
assert right_by_position is not None
i = right_star.pos
assert i is not None
while i < len(left.arg_kinds) and left.arg_kinds[i].is_positional():
left_by_position = left.argument_by_position(i)
assert left_by_position is not None
res.extend(
infer_directed_arg_constraints(
left_by_position.typ, right_by_position.typ, direction
)
)
i += 1
# Phase 1d: compare left args with right **kwargs.
if right_star2 is not None:
right_names = {name for name in right.arg_names if name is not None}
left_only_names = set()
for name, kind in zip(left.arg_names, left.arg_kinds):
if name is None or kind.is_star() or name in right_names:
continue
left_only_names.add(name)
right_by_name = right.try_synthesizing_arg_from_kwarg(None)
assert right_by_name is not None
for name in left_only_names:
left_by_name = left.argument_by_name(name)
assert left_by_name is not None
res.extend(
infer_directed_arg_constraints(left_by_name.typ, right_by_name.typ, direction)
)
return res
def filter_imprecise_kinds(cs: list[Constraint]) -> list[Constraint]:
"""For each ParamSpec remove all imprecise constraints, if at least one precise available."""
have_precise = set()
for c in cs:
if not isinstance(c.origin_type_var, ParamSpecType):
continue
if (
isinstance(c.target, ParamSpecType)
or isinstance(c.target, Parameters)
and not c.target.imprecise_arg_kinds
):
have_precise.add(c.type_var)
new_cs = []
for c in cs:
if not isinstance(c.origin_type_var, ParamSpecType) or c.type_var not in have_precise:
new_cs.append(c)
if not isinstance(c.target, Parameters) or not c.target.imprecise_arg_kinds:
new_cs.append(c)
return new_cs
| ConstraintBuilderVisitor |
python | scipy__scipy | scipy/special/tests/test_hyp2f1.py | {
"start": 1740,
"end": 91399
} | class ____:
"""Tests for hyp2f1 for complex values.
Expected values for test cases were computed using mpmath. See
`scipy.special._precompute.hyp2f1_data`. The verbose style of specifying
test cases is used for readability and to make it easier to mark individual
cases as expected to fail. Expected failures are used to highlight cases
where improvements are needed. See
`scipy.special._precompute.hyp2f1_data.make_hyp2f1_test_cases` for a
function to generate the boilerplate for the test cases.
Assertions have been added to each test to ensure that the test cases match
the situations that are intended. A final test `test_test_hyp2f1` checks
that the expected values in the test cases actually match what is computed
by mpmath. This test is marked slow even though it isn't particularly slow
so that it won't run by default on continuous integration builds.
"""
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0
)
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0 + 0j,
expected=1 + 0j,
rtol=0
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=-10,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=0,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0 + 0j,
expected=np.nan + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-5,
c=-10,
z=0.2 + 0.2j,
expected=(1.0495404166666666+0.05708208333333334j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-10,
c=-10,
z=0.2 + 0.2j,
expected=(1.092966013125+0.13455014673750001j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-10,
b=-20,
c=-10,
z=0.2 + 0.2j,
expected=(-0.07712512000000005+0.12752814080000005j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1,
b=3.2,
c=-1,
z=0.2 + 0.2j,
expected=(1.6400000000000001+0.6400000000000001j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-2,
b=1.2,
c=-4,
z=1 + 0j,
expected=1.8200000000000001 + 0j,
rtol=1e-15,
),
),
]
)
def test_c_non_positive_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.5,
z=1 + 0j,
expected=1.1496439092239847 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=20.31,
z=1 + 0j,
expected=69280986.75273195 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=290.2,
b=321.5,
c=700.1,
z=1 + 0j,
expected=1.3396562400934e117 + 0j,
rtol=1e-12,
),
),
# Note that here even mpmath produces different results for
# results that should be equivalent.
pytest.param(
Hyp2f1TestCase(
a=9.2,
b=621.5,
c=700.1,
z=(1+0j),
expected=(952726652.4158565+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=621.5,
b=9.2,
c=700.1,
z=(1+0j),
expected=(952726652.4160284+0j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-101.2,
b=-400.4,
c=-172.1,
z=(1+0j),
expected=(2.2253618341394838e+37+0j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-400.4,
b=-101.2,
c=-172.1,
z=(1+0j),
expected=(2.2253618341394838e+37+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=172.5,
b=-201.3,
c=151.2,
z=(1+0j),
expected=(7.072266653650905e-135+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-201.3,
b=172.5,
c=151.2,
z=(1+0j),
expected=(7.072266653650905e-135+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=1.3,
z=1 + 0j,
expected=2.7899070752746906e22 + 0j,
rtol=3e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-202.6,
b=60.3,
c=1.5,
z=1 + 0j,
expected=-1.3113641413099326e-56 + 0j,
rtol=1e-12,
),
),
],
)
def test_unital_argument(self, hyp2f1_test_case):
"""Tests for case z = 1, c - a - b > 0.
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert z == 1 and c - a - b > 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.3,
z=-1 + 0j,
expected=0.9428846409614143 + 0j,
rtol=1e-15),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=5.300000000000001,
z=-1 + 0j,
expected=-4.845809986595704e-06 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=221.5,
b=90.2,
c=132.3,
z=-1 + 0j,
expected=2.0490488728377282e-42 + 0j,
rtol=1e-7,
),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=-80.8,
z=-1 + 0j,
expected=45143784.46783885 + 0j,
rtol=1e-7,
),
marks=pytest.mark.xfail(
condition=sys.maxsize < 2**32,
reason="Fails on 32 bit.",
)
),
],
)
def test_special_case_z_near_minus_1(self, hyp2f1_test_case):
"""Tests for case z ~ -1, c ~ 1 + a - b
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(1 + a - b - c) < 1e-15 and abs(z + 1) < 1e-15
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-4,
b=2.02764642551431,
c=1.0561196186065624,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0031961077109535375-0.0011313924606557173j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=-7.937789122896016,
c=-15.964218273004214,
z=(2-0.10526315789473695j),
expected=(0.005543763196412503-0.0025948879065698306j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=8.095813935368371,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.0003054674127221263-9.261359291755414e-05j),
rtol=1e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=-4,
b=-3.956227226099288,
c=-3.9316537064827854,
z=(1.1578947368421053-0.3157894736842106j),
expected=(-0.0020809502580892937-0.0041877333232365095j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-4,
c=2.050308316530781,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0011282435590058734+0.0002027062303465851j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-8,
c=-15.964218273004214,
z=(1.3684210526315788+0.10526315789473673j),
expected=(-9.134907719238265e-05-0.00040219233987390723j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-4,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.000519013062087489-0.0005855883076830948j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-10000,
b=2.2,
c=93459345.3,
z=(2+2j),
expected=(0.9995292071559088-0.00047047067522659253j),
rtol=1e-12,
),
),
]
)
def test_a_b_negative_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert a == int(a) and a < 0 or b == int(b) and b < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.1578947368421053-1.1578947368421053j),
expected=(0.9778506962676361+0.044083801141231616j),
rtol=3e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-3.9316537064827854,
c=1.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(4.0793167523167675-10.11694246310966j),
rtol=6e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-0.9629749245209605,
c=2.5,
z=(1.1578947368421053-0.10526315789473695j),
expected=(-2.9692999501916915+0.6394599899845594j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.5789473684210522-1.1578947368421053j),
expected=(0.9493076367106102-0.04316852977183447j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.5,
c=-15.5,
z=(0.5263157894736841+0.10526315789473673j),
expected=(0.9844377175631795-0.003120587561483841j),
rtol=1e-10,
),
),
],
)
def test_a_b_neg_int_after_euler_hypergeometric_transformation(
self, hyp2f1_test_case
):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert ( # Tests the test
(abs(c - a - int(c - a)) < 1e-15 and c - a < 0) or
(abs(c - b - int(c - b)) < 1e-15 and c - b < 0)
)
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.10526315789473673-0.3157894736842106j),
expected=(0.9941449585778349+0.01756335047931358j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.5263157894736841+0.5263157894736841j),
expected=(1.0388722293372104-0.09549450380041416j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=-7.93846038215665,
z=(0.10526315789473673+0.7368421052631575j),
expected=(2.1948378809826434+24.934157235172222j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.3157894736842106-0.736842105263158j),
expected=(-0.4075277891264672-0.06819344579666956j),
rtol=2e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=2.050308316530781,
c=8.031683612216888,
z=(0.7368421052631575-0.10526315789473695j),
expected=(2.833535530740603-0.6925373701408158j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=4.078873014294075,
z=(0.10526315789473673-0.3157894736842106j),
expected=(1.005347176329683-0.3580736009337313j),
rtol=5e-16,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9824353641135369+0.029271018868990268j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-159.63511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9982436200365834+0.002927268199671111j),
rtol=1e-7,
),
marks=pytest.mark.xfail(reason="Poor convergence.")
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.5263157894736841-0.5263157894736843j),
expected=(-0.6906825165778091+0.8176575137504892j),
rtol=5e-13,
),
),
]
)
def test_region1(self, hyp2f1_test_case):
"""|z| < 0.9 and real(z) >= 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 0.9 and z.real >= 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=4.078873014294075,
z=(-0.3157894736842106+0.7368421052631575j),
expected=(0.7751915029081136+0.24068493258607315j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=16.088264119063613,
c=2.0397202577726152,
z=(-0.9473684210526316-0.3157894736842106j),
expected=(6.564549348474962e-07+1.6761570598334562e-06j),
rtol=5e-09,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=2.050308316530781,
c=16.056809865262608,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(0.9862043298997204-0.013293151372712681j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=8.077282662161238,
c=16.056809865262608,
z=(-0.3157894736842106-0.736842105263158j),
expected=(0.16163826638754716-0.41378530376373734j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=-0.906685989801748,
z=(-0.5263157894736843+0.3157894736842106j),
expected=(-6.256871535165936+0.13824973858225484j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=8.077282662161238,
c=-3.9924618758357022,
z=(-0.9473684210526316-0.3157894736842106j),
expected=(75.54672526086316+50.56157041797548j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=8.077282662161238,
c=-1.9631175993998025,
z=(-0.5263157894736843+0.5263157894736841j),
expected=(282.0602536306534-82.31597306936214j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-3.9316537064827854,
c=8.031683612216888,
z=(-0.5263157894736843-0.10526315789473695j),
expected=(5.179603735575851+1.4445374002099813j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-7.949900487447654,
c=1.0651378143226575,
z=(-0.3157894736842106-0.9473684210526316j),
expected=(2317.623517606141-269.51476321010324j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-1.92872979730171,
c=2.0397202577726152,
z=(-0.736842105263158-0.3157894736842106j),
expected=(29.179154096175836+22.126690357535043j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-3.9316537064827854,
c=-15.963511401609862,
z=(-0.736842105263158-0.10526315789473695j),
expected=(0.20820247892032057-0.04763956711248794j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.3157894736842106-0.5263157894736843j),
expected=(-157471.63920142158+991294.0587828817j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-7.949900487447654,
c=-7.93846038215665,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(0.30765349653210194-0.2979706363594157j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=1.0561196186065624,
c=8.031683612216888,
z=(-0.9473684210526316-0.10526315789473695j),
expected=(1.6787607400597109+0.10056620134616838j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.088264119063613,
c=4.078873014294075,
z=(-0.5263157894736843-0.736842105263158j),
expected=(7062.07842506049-12768.77955655703j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.088264119063613,
c=2.0397202577726152,
z=(-0.3157894736842106+0.7368421052631575j),
expected=(54749.216391029935-23078.144720887536j),
rtol=2e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=1.0561196186065624,
c=-0.906685989801748,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(1.21521766411428-4.449385173946672j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=4.0013768449590685,
c=-1.9631175993998025,
z=(-0.736842105263158+0.5263157894736841j),
expected=(19234693144.196907+1617913967.7294445j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=1.0561196186065624,
c=-15.963511401609862,
z=(-0.5263157894736843+0.3157894736842106j),
expected=(0.9345201094534371+0.03745712558992195j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(-0.10526315789473695+0.10526315789473673j),
expected=(0.605732446296829+0.398171533680972j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-15.964218273004214,
c=2.0397202577726152,
z=(-0.10526315789473695-0.5263157894736843j),
expected=(-9.753761888305416-4.590126012666959j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-1.92872979730171,
c=2.0397202577726152,
z=(-0.10526315789473695+0.3157894736842106j),
expected=(0.45587226291120714+1.0694545265819797j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=-0.906685989801748,
z=(-0.736842105263158+0.3157894736842106j),
expected=(12.334808243233418-76.26089051819054j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=-15.963511401609862,
z=(-0.5263157894736843+0.10526315789473673j),
expected=(1.2396019687632678-0.047507973161146286j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-0.9629749245209605,
c=-0.906685989801748,
z=(-0.3157894736842106-0.5263157894736843j),
expected=(97.7889554372208-18.999754543400016j),
rtol=5e-13,
),
),
]
)
def test_region2(self, hyp2f1_test_case):
"""|z| < 1 and real(z) < 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 1 and z.real < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-3.75,
c=-3.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(-1279.4894322256655-2302.914821389276j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=8.25,
c=-1.5,
z=(0.9473684210526314+0.3157894736842106j),
expected=(-8889.452798586273-11961.162305065242j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.25,
b=2.25,
c=-1.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(-236.58971357952055-238.5228224781136j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.75,
b=-7.75,
c=-15.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(0.8116076584352279-0.29360565398246036j),
rtol=5e-16,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.25,
b=4.25,
c=-0.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(-28.119407485189985+98.89858821348005j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.75,
b=2.25,
c=1.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(0.5311049067450484-0.9434347326448517j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=-15.75,
c=-7.5,
z=(0.9473684210526314+0.10526315789473673j),
expected=(1262084.378141873+1775569.6338380123j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-7.75,
c=-15.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(-0.009810480794804165+0.3648997569257999j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=2.25,
c=-3.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(585660.8815535795-33646.68398590896j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.9316537064827854,
c=-15.963511401609862,
z=(0.9473684210526314-0.10526315789473695j),
expected=(181899621848365.2-173207123998705.7j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=8.25,
c=-0.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.04271686244952705-0.14087902824639406j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-1.92872979730171,
c=-0.906685989801748,
z=(0.9473684210526314-0.3157894736842106j),
expected=(-449.5119088817207+320.1423128036188j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=1.0561196186065624,
c=8.031683612216888,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.6361479738012501+0.028575620091205088j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=16.25,
c=16.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(-0.9038811840552261-1.5356250756164884j),
rtol=1e-8,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=-1.75,
c=-1.5,
z=(0.9473684210526314+0.3157894736842106j),
expected=(653.0109150415394-4554.162605155542j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-3.75,
c=4.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(118.7009859241035-34.18713648654642j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=-15.75,
c=-3.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(-540204.4774526551+4970059.109251281j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=-15.75,
c=-0.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(2253490.972258385+3318620.683390017j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-7.75,
c=-7.5,
z=(0.9473684210526314+0.3157894736842106j),
expected=(-46159826.46716958-17880663.82218242j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=-7.93846038215665,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.07116833581404514+0.11823358038036977j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=4.0013768449590685,
c=-7.93846038215665,
z=(0.7368421052631575+0.5263157894736841j),
expected=(4.7724909620664006e+17-6.039064078946702e+16j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.25,
b=-7.75,
c=1.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0188022179759303+0.002921737281641378j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=1.0561196186065624,
c=-7.93846038215665,
z=(0.7368421052631575-0.5263157894736843j),
expected=(-9203.462928334846+12390.110518017136j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-15.75,
c=8.5,
z=(0.7368421052631575+0.5263157894736841j),
expected=(6.468457061368628+24.190040684917374j),
rtol=6e-16,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=2.0397202577726152,
z=(0.7368421052631575+0.5263157894736841j),
expected=(2408.3451340186543-4275.257316636014j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-7.75,
c=8.5,
z=(0.7368421052631575-0.5263157894736843j),
expected=(4.1379984626381345-5.183654781039423j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=-7.75,
c=-0.5,
z=(0.5263157894736841+0.7368421052631575j),
expected=(-81177.775295738+56079.73286548954j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=2.050308316530781,
c=-0.906685989801748,
z=(0.9473684210526314+0.3157894736842106j),
expected=(1192868.5068926765+3624210.8182139914j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-1.92872979730171,
c=8.031683612216888,
z=(0.5263157894736841+0.7368421052631575j),
expected=(1.8286341846195202+1.9295255682312178j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=1.0561196186065624,
c=16.056809865262608,
z=(0.7368421052631575-0.5263157894736843j),
expected=(1.0514645669696452-0.0430834059440128j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-15.964218273004214,
c=2.0397202577726152,
z=(0.5263157894736841+0.7368421052631575j),
expected=(541983.236432269+288200.2043029435j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.25,
b=8.25,
c=1.5,
z=(0.5263157894736841-0.736842105263158j),
expected=(-10.931988086039945+1.9136272843579096j),
rtol=1e-15,
),
),
]
)
def test_region3(self, hyp2f1_test_case):
"""0.9 <= |z| <= 1 and |1 - z| < 0.9."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 0.9 <= abs(z) <= 1 and abs(1 - z) < 0.9 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=4.25,
c=2.5,
z=(0.4931034482758623-0.7965517241379311j),
expected=(38.41207903409937-30.510151276075792j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.0,
b=16.087593263474208,
c=16.088264119063613,
z=(0.5689655172413794-0.7965517241379311j),
expected=(-0.6667857912761286-1.0206224321443573j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.0,
b=1.0272592605282642,
c=-7.949900487447654,
z=(0.4931034482758623-0.7965517241379311j),
expected=(1679024.1647997478-2748129.775857212j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=16.0,
c=-7.949900487447654,
z=(0.4931034482758623-0.7965517241379311j),
expected=(424747226301.16986-1245539049327.2856j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-15.964218273004214,
c=4.0,
z=(0.4931034482758623-0.7965517241379311j),
expected=(-0.0057826199201757595+0.026359861999025885j),
rtol=5e-06,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.5689655172413794-0.7965517241379311j),
expected=(0.4671901063492606+0.7769632229834897j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.0,
b=-3.956227226099288,
c=-7.949900487447654,
z=(0.4931034482758623+0.7965517241379312j),
expected=(0.9422283708145973+1.3476905754773343j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0,
b=-15.980848054962111,
c=-15.964218273004214,
z=(0.4931034482758623-0.7965517241379311j),
expected=(0.4168719497319604-0.9770953555235625j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=16.088264119063613,
c=2.5,
z=(0.5689655172413794+0.7965517241379312j),
expected=(1.279096377550619-2.173827694297929j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=4.0013768449590685,
c=2.0397202577726152,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-2.071520656161738-0.7846098268395909j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=8.0,
c=-0.9629749245209605,
z=(0.5689655172413794-0.7965517241379311j),
expected=(-7.740015495862889+3.386766435696699j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.088264119063613,
c=-7.93846038215665,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-6318.553685853241-7133.416085202879j),
rtol=5e-9,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-3.9316537064827854,
c=16.056809865262608,
z=(0.5689655172413794+0.7965517241379312j),
expected=(-0.8854577905547399+8.135089099967278j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-0.9629749245209605,
c=4.078873014294075,
z=(0.4931034482758623+0.7965517241379312j),
expected=(1.224291301521487+0.36014711766402485j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-0.75,
c=-1.5,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-1.5765685855028473-3.9399766961046323j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-1.92872979730171,
c=-7.93846038215665,
z=(0.5689655172413794-0.7965517241379311j),
expected=(56.794588688231194+4.556286783533971j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=4.5,
c=2.050308316530781,
z=(0.5689655172413794+0.7965517241379312j),
expected=(-4.251456563455306+6.737837111569671j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=8.5,
c=-1.92872979730171,
z=(0.4931034482758623-0.7965517241379311j),
expected=(2177143.9156599627-3313617.2748088865j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-1.5,
c=4.0013768449590685,
z=(0.4931034482758623-0.7965517241379311j),
expected=(0.45563554481603946+0.6212000158060831j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.5,
c=-15.964218273004214,
z=(0.4931034482758623+0.7965517241379312j),
expected=(61.03201617828073-37.185626416756214j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=16.5,
c=4.0013768449590685,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-33143.425963520735+20790.608514722644j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=4.5,
c=-0.9629749245209605,
z=(0.5689655172413794+0.7965517241379312j),
expected=(30.778600270824423-26.65160354466787j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-3.5,
c=16.088264119063613,
z=(0.5689655172413794-0.7965517241379311j),
expected=(1.0629792615560487-0.08308454486044772j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=-7.5,
c=-0.9629749245209605,
z=(0.4931034482758623-0.7965517241379311j),
expected=(17431.571802591767+3553.7129767034507j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.25,
b=8.25,
c=16.5,
z=(0.11379310344827598+0.9482758620689657j),
expected=(0.4468600750211926+0.7313214934036885j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=16.25,
c=4.5,
z=(0.3413793103448277+0.8724137931034486j),
expected=(-3.905704438293991+3.693347860329299j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=4.25,
c=-0.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-40.31777941834244-89.89852492432011j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=8.0,
c=-15.964218273004214,
z=(0.11379310344827598-0.9482758620689655j),
expected=(52584.347773055284-109197.86244309516j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-15.964218273004214,
c=16.056809865262608,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-1.187733570412592-1.5147865053584582j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.9316537064827854,
c=1.0651378143226575,
z=(0.26551724137931054+0.9482758620689657j),
expected=(13.077494677898947+35.071599628224966j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.5,
c=-3.5,
z=(0.26551724137931054+0.8724137931034486j),
expected=(-0.5359656237994614-0.2344483936591811j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=-3.75,
c=-1.5,
z=(0.26551724137931054+0.9482758620689657j),
expected=(1204.8114871663133+64.41022826840198j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.0,
c=4.0013768449590685,
z=(0.03793103448275881-0.9482758620689655j),
expected=(-9.85268872413994+7.011107558429154j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.0,
c=4.0013768449590685,
z=(0.3413793103448277-0.8724137931034484j),
expected=(528.5522951158454-1412.21630264791j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=1.0561196186065624,
c=-7.5,
z=(0.4172413793103451+0.8724137931034486j),
expected=(133306.45260685298+256510.7045225382j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=8.077282662161238,
c=-15.963511401609862,
z=(0.3413793103448277-0.8724137931034484j),
expected=(-0.998555715276967+2.774198742229889j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.75,
b=-0.75,
c=1.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(2.072445019723025-2.9793504811373515j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.92872979730171,
c=1.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-41.87581944176649-32.52980303527139j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-15.75,
c=-0.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-3729.6214864209774-30627.510509112635j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-0.906685989801748,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-131615.07820609974+145596.13384245415j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=16.5,
c=16.088264119063613,
z=(0.26551724137931054+0.8724137931034486j),
expected=(0.18981844071070744+0.7855036242583742j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.5,
b=8.5,
c=-3.9316537064827854,
z=(0.11379310344827598-0.9482758620689655j),
expected=(110224529.2376068+128287212.04290268j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-7.5,
c=4.0013768449590685,
z=(0.3413793103448277-0.8724137931034484j),
expected=(0.2722302180888523-0.21790187837266162j),
rtol=1.2e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.5,
c=-15.964218273004214,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-2.8252338010989035+2.430661949756161j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=16.5,
c=4.0013768449590685,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-20.604894257647945+74.5109432558078j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=8.5,
c=-0.9629749245209605,
z=(0.3413793103448277+0.8724137931034486j),
expected=(-2764422.521269463-3965966.9965808876j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.5,
b=-0.5,
c=1.0561196186065624,
z=(0.26551724137931054+0.9482758620689657j),
expected=(1.2262338560994905+0.6545051266925549j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-15.5,
c=-7.949900487447654,
z=(0.4172413793103451-0.8724137931034484j),
expected=(-2258.1590330318213+8860.193389158803j),
rtol=1.4e-10,
),
),
]
)
def test_region4(self, hyp2f1_test_case):
"""0.9 <= |z| <= 1 and |1 - z| >= 1.
This region is unhandled by of the standard transformations and
needs special care.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=16.088264119063613,
c=8.5,
z=(0.6448275862068968+0.8724137931034486j),
expected=(0.018601324701770394-0.07618420586062377j),
rtol=5e-08,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=4.25,
c=4.5,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-1.391549471425551-0.118036604903893j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=-1.9631175993998025,
z=(0.6448275862068968+0.8724137931034486j),
expected=(-2309.178768155151-1932.7247727595172j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=1.0,
c=-15.964218273004214,
z=(0.6448275862068968+0.8724137931034486j),
expected=(85592537010.05054-8061416766688.324j),
rtol=2e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-0.5,
c=1.5,
z=(0.6448275862068968+0.8724137931034486j),
expected=(1.2334498208515172-2.1639498536219732j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-15.964218273004214,
c=4.0,
z=(0.6448275862068968+0.8724137931034486j),
expected=(102266.35398605966-44976.97828737755j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=4.0,
b=-3.956227226099288,
c=-15.964218273004214,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-2.9590030930007236-4.190770764773225j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-15.5,
c=-7.5,
z=(0.5689655172413794-0.8724137931034484j),
expected=(-112554838.92074208+174941462.9202412j),
rtol=5e-05,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=2.050308316530781,
c=1.0,
z=(0.6448275862068968-0.8724137931034484j),
expected=(3.7519882374080145+7.360753798667486j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=2.050308316530781,
c=4.0,
z=(0.6448275862068968-0.8724137931034484j),
expected=(0.000181132943964693+0.07742903103815582j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=4.0013768449590685,
c=-1.9631175993998025,
z=(0.5689655172413794+0.8724137931034486j),
expected=(386338.760913596-386166.51762171905j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=8.0,
c=-1.92872979730171,
z=(0.6448275862068968+0.8724137931034486j),
expected=(1348667126.3444858-2375132427.158893j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=-0.9629749245209605,
c=4.5,
z=(0.5689655172413794+0.8724137931034486j),
expected=(1.428353429538678+0.6472718120804372j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.5689655172413794-0.8724137931034484j),
expected=(3.1439267526119643-3.145305240375117j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-15.964218273004214,
c=-7.93846038215665,
z=(0.6448275862068968-0.8724137931034484j),
expected=(75.27467675681773+144.0946946292215j),
rtol=1e-07,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-7.75,
c=-7.5,
z=(0.5689655172413794+0.8724137931034486j),
expected=(-0.3699450626264222+0.8732812475910993j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=16.5,
c=1.0561196186065624,
z=(0.5689655172413794-0.8724137931034484j),
expected=(5.5361025821300665-2.4709693474656285j),
rtol=5e-09,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=8.5,
c=-3.9316537064827854,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-782805.6699207705-537192.581278909j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-15.5,
c=1.0561196186065624,
z=(0.6448275862068968+0.8724137931034486j),
expected=(12.345113400639693-14.993248992902007j),
rtol=0.0005,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=-0.5,
c=-15.964218273004214,
z=(0.6448275862068968+0.8724137931034486j),
expected=(23.698109392667842+97.15002033534108j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=16.5,
c=4.0013768449590685,
z=(0.6448275862068968-0.8724137931034484j),
expected=(1115.2978631811834+915.9212658718577j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=16.5,
c=-0.9629749245209605,
z=(0.6448275862068968+0.8724137931034486j),
expected=(642077722221.6489+535274495398.21027j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=-3.5,
c=4.0013768449590685,
z=(0.5689655172413794+0.8724137931034486j),
expected=(-5.689219222945697+16.877463062787143j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.5,
c=-0.9629749245209605,
z=(0.5689655172413794-0.8724137931034484j),
expected=(-44.32070290703576+1026.9127058617403j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=2.25,
c=4.5,
z=(0.11379310344827598-1.024137931034483j),
expected=(-0.021965227124574663+0.009908300237809064j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.5,
c=16.5,
z=(0.26551724137931054+1.024137931034483j),
expected=(1.0046072901244183+0.19945500134119992j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=1.0,
c=-3.9316537064827854,
z=(0.3413793103448277+0.9482758620689657j),
expected=(21022.30133421465+49175.98317370489j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=16.088264119063613,
c=-1.9631175993998025,
z=(0.4172413793103451-0.9482758620689655j),
expected=(-7024239.358547302+2481375.02681063j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-15.75,
c=1.5,
z=(0.18965517241379315+1.024137931034483j),
expected=(92371704.94848-403546832.548352j),
rtol=5e-06,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.949900487447654,
c=8.5,
z=(0.26551724137931054-1.024137931034483j),
expected=(1.9335109845308265+5.986542524829654j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-1.92872979730171,
c=-7.93846038215665,
z=(0.4931034482758623+0.8724137931034486j),
expected=(-122.52639696039328-59.72428067512221j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-1.75,
c=-1.5,
z=(0.4931034482758623+0.9482758620689657j),
expected=(-90.40642053579428+50.50649180047921j),
rtol=5e-08,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=8.077282662161238,
c=16.5,
z=(0.4931034482758623+0.9482758620689657j),
expected=(-0.2155745818150323-0.564628986876639j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=1.0561196186065624,
c=8.031683612216888,
z=(0.4172413793103451-0.9482758620689655j),
expected=(0.9503140488280465+0.11574960074292677j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.75,
b=2.25,
c=-15.5,
z=(0.4172413793103451+0.9482758620689657j),
expected=(0.9285862488442175+0.8203699266719692j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.75,
b=4.25,
c=-15.5,
z=(0.3413793103448277-0.9482758620689655j),
expected=(-1.0509834850116921-1.1145522325486075j),
rtol=1.1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.4931034482758623-0.9482758620689655j),
expected=(2.88119116536769-3.4249933450696806j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-15.964218273004214,
c=16.5,
z=(0.18965517241379315+1.024137931034483j),
expected=(199.65868451496038+347.79384207302877j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-15.75,
c=-3.5,
z=(0.4931034482758623-0.8724137931034484j),
expected=(-208138312553.07013+58631611809.026955j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-15.5,
c=-7.5,
z=(0.3413793103448277+0.9482758620689657j),
expected=(-23032.90519856288-18256.94050457296j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=1.5,
c=1.0561196186065624,
z=(0.4931034482758623-0.8724137931034484j),
expected=(1.507342459587056+1.2332023580148403j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=4.5,
c=-3.9316537064827854,
z=(0.4172413793103451+0.9482758620689657j),
expected=(7044.766127108853-40210.365567285575j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=-1.5,
c=1.0561196186065624,
z=(0.03793103448275881+1.024137931034483j),
expected=(0.2725347741628333-2.247314875514784j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=-1.5,
c=-7.949900487447654,
z=(0.26551724137931054+1.024137931034483j),
expected=(-11.250200011017546+12.597393659160472j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=8.5,
c=16.088264119063613,
z=(0.26551724137931054+1.024137931034483j),
expected=(-0.18515160890991517+0.7959014164484782j),
rtol=2e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=16.5,
c=-3.9316537064827854,
z=(0.3413793103448277-1.024137931034483j),
expected=(998246378.8556538+1112032928.103645j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.5,
b=-3.5,
c=2.050308316530781,
z=(0.03793103448275881+1.024137931034483j),
expected=(0.5527670397711952+2.697662715303637j),
rtol=1.2e-15, # rtol bumped from 1e-15 in gh18414
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.5,
c=-0.9629749245209605,
z=(0.4931034482758623-0.8724137931034484j),
expected=(55.396931662136886+968.467463806326j),
rtol=5e-14,
),
),
]
)
def test_region5(self, hyp2f1_test_case):
"""1 < |z| < 1.1 and |1 - z| >= 0.9 and real(z) >= 0"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=4.0013768449590685,
c=4.078873014294075,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(-0.0018093573941378783+0.003481887377423739j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=2.050308316530781,
c=1.0651378143226575,
z=(-0.736842105263158-0.736842105263158j),
expected=(-0.00023401243818780545-1.7983496305603562e-05j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=8.077282662161238,
c=4.078873014294075,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(0.22359773002226846-0.24092487123993353j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=2.050308316530781,
c=-15.963511401609862,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(1.191573745740011+0.14347394589721466j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=4.0013768449590685,
c=-15.963511401609862,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(31.822620756901784-66.09094396747611j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=8.077282662161238,
c=-7.93846038215665,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(207.16750179245952+34.80478274924269j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-7.949900487447654,
c=8.031683612216888,
z=(-0.736842105263158+0.7368421052631575j),
expected=(-159.62429364277145+9.154224290644898j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-1.92872979730171,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1.121122351247184-0.07170260470126685j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-0.9629749245209605,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1.9040596681316053-0.4951799449960107j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-1.92872979730171,
c=-0.906685989801748,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-14.496623497780739-21.897524523299875j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.9316537064827854,
c=-3.9924618758357022,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(36.33473466026878+253.88728442029577j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-15.964218273004214,
c=-0.906685989801748,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1505052.5653144997-50820766.81043443j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=4.0013768449590685,
c=1.0651378143226575,
z=(-0.5263157894736843+0.9473684210526314j),
expected=(-127.79407519260877-28.69899444941112j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=8.077282662161238,
c=16.056809865262608,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(2.0623331933754976+0.741234463565458j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=8.077282662161238,
c=2.0397202577726152,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(30.729193458862525-292.5700835046965j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=1.0561196186065624,
c=-1.9631175993998025,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(1.1285917906203495-0.735264575450189j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=1.0561196186065624,
c=-3.9924618758357022,
z=(-0.736842105263158+0.7368421052631575j),
expected=(0.6356474446678052-0.02429663008952248j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.088264119063613,
c=-7.93846038215665,
z=(-0.736842105263158+0.7368421052631575j),
expected=(0.4718880510273174+0.655083067736377j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-3.9316537064827854,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(-0.14681550942352714+0.16092206364265146j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-15.964218273004214,
c=1.0651378143226575,
z=(-0.5263157894736843+0.9473684210526314j),
expected=(-6.436835190526225+22.883156700606182j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=4.078873014294075,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-0.7505682955068583-1.1026583264249945j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-3.9316537064827854,
c=-7.93846038215665,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(3.6247814989198166+2.596041360148318j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(-59537.65287927933-669074.4342539902j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-433084.9970266166+431088.393918521j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1,
b=1,
c=4,
z=(3 + 4j),
expected=(0.49234384000963544+0.6051340616612397j),
rtol=5e-14,
),
),
]
)
def test_region6(self, hyp2f1_test_case):
"""|z| > 1 but not in region 5."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert (
abs(z) > 1 and
not (1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0)
)
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
# Broke when fixing gamma pole behavior in gh-21827
pytest.param(
Hyp2f1TestCase(
a=1.3,
b=-0.2,
c=0.3,
z=-2.1,
expected=1.8202169687521206,
rtol=5e-15,
),
),
]
)
def test_miscellaneous(self, hyp2f1_test_case ):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.slow
@check_version(mpmath, "1.0.0")
def test_test_hyp2f1(self):
"""Test that expected values match what is computed by mpmath.
This gathers the parameters for the test cases out of the pytest marks.
The parameters are a, b, c, z, expected, rtol, where expected should
be the value of hyp2f1(a, b, c, z) computed with mpmath. The test
recomputes hyp2f1(a, b, c, z) using mpmath and verifies that expected
actually is the correct value. This allows the data for the tests to
live within the test code instead of an external datafile, while
avoiding having to compute the results with mpmath during the test,
except for when slow tests are being run.
"""
test_methods = [
test_method for test_method in dir(self)
if test_method.startswith('test') and
# Filter properties and attributes (futureproofing).
callable(getattr(self, test_method)) and
# Filter out this test
test_method != 'test_test_hyp2f1'
]
for test_method in test_methods:
params = self._get_test_parameters(getattr(self, test_method))
for a, b, c, z, expected, _ in params:
assert_allclose(mp_hyp2f1(a, b, c, z), expected, rtol=2.25e-16)
def _get_test_parameters(self, test_method):
"""Get pytest.mark parameters for a test in this class."""
return [
case.values[0] for mark in test_method.pytestmark
if mark.name == 'parametrize'
for case in mark.args[1]
]
| TestHyp2f1 |
python | conda__conda | conda/plugins/reporter_backends/json.py | {
"start": 1806,
"end": 2855
} | class ____(ReporterRendererBase):
"""
Default implementation for JSON reporting in conda
"""
def render(self, data: Any, **kwargs) -> str:
return json.dumps(data)
def detail_view(self, data: dict[str, str | int | bool], **kwargs) -> str:
return json.dumps(data)
def envs_list(
self, data: list[str] | dict[str, dict[str, str | bool | None]], **kwargs
) -> str:
if isinstance(data, (list, tuple)):
return json.dumps({"envs": data})
return json.dumps(data)
def progress_bar(
self,
description: str,
**kwargs,
) -> ProgressBarBase:
return JSONProgressBar(description, **kwargs)
def spinner(self, message: str, fail_message: str = "failed\n") -> SpinnerBase:
return JSONSpinner(message, fail_message)
def prompt(
self, message: str = "Proceed", choices=("yes", "no"), default: str = "yes"
) -> str:
"""
For this class, we want this method to do nothing
"""
| JSONReporterRenderer |
python | PrefectHQ__prefect | tests/blocks/test_notifications.py | {
"start": 5708,
"end": 9663
} | class ____:
async def test_notify_async(self):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
mm_block = MattermostWebhook(
hostname="example.com",
token="token",
include_image=True,
)
await mm_block.notify("test")
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
f"mmost://{mm_block.hostname}:8065/{mm_block.token.get_secret_value()}/"
"?image=yes&format=text&overflow=upstream"
)
apprise_instance_mock.async_notify.assert_awaited_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_notify_secure(self):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
mm_block = MattermostWebhook(
hostname="example.com", token="token", secure=True, port=443
)
@flow
def test_flow():
mm_block.notify("test")
test_flow()
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
f"mmosts://{mm_block.hostname}/{mm_block.token.get_secret_value()}/"
"?image=no&format=text&overflow=upstream"
)
apprise_instance_mock.async_notify.assert_called_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_notify_sync(self):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
mm_block = MattermostWebhook(hostname="example.com", token="token")
@flow
def test_flow():
mm_block.notify("test")
test_flow()
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
f"mmost://{mm_block.hostname}:8065/{mm_block.token.get_secret_value()}/"
"?image=no&format=text&overflow=upstream"
)
apprise_instance_mock.async_notify.assert_called_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_notify_with_multiple_channels(self):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
mm_block = MattermostWebhook(
hostname="example.com",
token="token",
channels=["general", "death-metal-anonymous"],
)
@flow
def test_flow():
mm_block.notify("test")
test_flow()
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
f"mmost://{mm_block.hostname}:8065/{mm_block.token.get_secret_value()}/"
"?image=no&format=text&overflow=upstream"
"&channel=death-metal-anonymous%2Cgeneral"
)
apprise_instance_mock.async_notify.assert_called_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_is_picklable(self):
block = MattermostWebhook(token="token", hostname="example.com")
pickled = cloudpickle.dumps(block)
unpickled = cloudpickle.loads(pickled)
assert isinstance(unpickled, MattermostWebhook)
| TestMattermostWebhook |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 14547,
"end": 17259
} | class ____(test_util.TensorFlowTestCase):
def _generateRandomWeakTensor(self, dtype, shape):
if dtype.is_integer:
array = np.random.default_rng().integers(
low=dtype.min, high=dtype.max, size=shape, endpoint=True)
return _get_weak_tensor(array, dtype=dtype)
else:
array = np.random.default_rng().uniform(low=-1.0, high=1.0, size=shape)
return _get_weak_tensor(array, dtype=dtype)
def _getValidDtypes(self):
return (dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64)
def testArgMax(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomWeakTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_max = np.argmax(np_values, axis=axis)
tf_max = math_ops.argmax(tf_values, axis=axis)
self.assertAllEqual(tf_max, np_max)
def testArgMaxReturnsFirstOccurence(self):
for dtype in self._getValidDtypes():
values = _get_weak_tensor(
[[10, 11, 15, 15, 10], [12, 12, 10, 10, 12]], dtype=dtype
)
self.assertAllEqual(
math_ops.argmax(values, axis=1),
np.argmax(self.evaluate(values), axis=1))
# Long tensor to ensure works with multithreading/GPU
values = array_ops.zeros(shape=(193681,), dtype=dtype)
self.assertAllEqual(math_ops.argmax(values), 0)
def testArgMaxUint16(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomWeakTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_max = np.argmax(np_values, axis=axis)
tf_max = math_ops.argmax(
tf_values, axis=axis, output_type=dtypes.uint16)
self.assertAllEqual(tf_max, np_max)
def testArgMin(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomWeakTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_min = np.argmin(np_values, axis=axis)
tf_min = math_ops.argmin(tf_values, axis=axis)
self.assertAllEqual(tf_min, np_min)
def testArgMinReturnsFirstOccurence(self):
for dtype in self._getValidDtypes():
values = _get_weak_tensor(
[[10, 11, 15, 15, 10], [12, 12, 10, 10, 12]], dtype=dtype
)
self.assertAllEqual(
math_ops.argmin(values, axis=1),
np.argmin(self.evaluate(values), axis=1))
# Long tensor to ensure works with multithreading/GPU
values = array_ops.zeros(shape=(193681,), dtype=dtype)
self.assertAllEqual(math_ops.argmin(values), 0)
| ArgMaxMinTest |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 40040,
"end": 40914
} | class ____(test_util.TensorFlowTestCase):
def testBasic(self):
for dtype in [np.float32, np.float64]:
values = [0, 1, np.nan, np.inf, -np.inf]
x = constant_op.constant(values, dtype=dtype)
zeros = constant_op.constant(np.zeros((5,)), dtype=dtype)
ones = constant_op.constant(np.ones((5,)), dtype=dtype)
with test_util.use_gpu():
tf_result_zeros = math_ops.multiply_no_nan(x, zeros)
self.assertAllEqual(tf_result_zeros, zeros)
tf_result_ones = math_ops.multiply_no_nan(x, ones)
self.assertAllEqual(tf_result_ones, x)
# Normal floating point arithmetic if nonfinite values are in the
# second argument.
tf_result_reverseargs = math_ops.multiply_no_nan(zeros, x)
self.assertAllEqual(zeros * x, tf_result_reverseargs)
@test_util.run_all_in_graph_and_eager_modes
| MultiplyNoNanTest |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 11022,
"end": 11375
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(10, 10)
self.l2 = torch.nn.ReLU()
self.l3 = torch.nn.Linear(10, 10)
self.l4 = torch.nn.ReLU()
def forward(self, x):
for block in self.children():
x = block(x)
return x
| Children |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 9151,
"end": 9296
} | class ____(EllipticCurve):
name = "secp192r1"
key_size = 192
group_order = 0xFFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831
| SECP192R1 |
python | pyqtgraph__pyqtgraph | pyqtgraph/console/exception_widget.py | {
"start": 201,
"end": 9786
} | class ____(QtWidgets.QGroupBox):
sigStackItemClicked = QtCore.Signal(object, object) # self, item
sigStackItemDblClicked = QtCore.Signal(object, object) # self, item
_threadException = QtCore.Signal(object)
def __init__(self, parent=None):
super().__init__(parent)
self._setupUi()
self.filterString = ''
self._inSystrace = False
# send exceptions raised in non-gui threads back to the main thread by signal.
self._threadException.connect(self._threadExceptionHandler)
def _setupUi(self):
self.setTitle("Exception Handling")
self.layout = QtWidgets.QGridLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setHorizontalSpacing(2)
self.layout.setVerticalSpacing(0)
self.clearExceptionBtn = QtWidgets.QPushButton("Clear Stack", self)
self.clearExceptionBtn.setEnabled(False)
self.layout.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton("Show All Exceptions", self)
self.catchAllExceptionsBtn.setCheckable(True)
self.layout.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton("Show Next Exception", self)
self.catchNextExceptionBtn.setCheckable(True)
self.layout.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox("Only Uncaught Exceptions", self)
self.onlyUncaughtCheck.setChecked(True)
self.layout.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.stackTree = StackWidget(self)
self.layout.addWidget(self.stackTree, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox("Run commands in selected stack frame", self)
self.runSelectedFrameCheck.setChecked(True)
self.layout.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.layout.addItem(spacerItem, 0, 5, 1, 1)
self.filterLabel = QtWidgets.QLabel("Filter (regex):", self)
self.layout.addWidget(self.filterLabel, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self)
self.layout.addWidget(self.filterText, 0, 3, 1, 1)
self.catchAllExceptionsBtn.toggled.connect(self.catchAllExceptions)
self.catchNextExceptionBtn.toggled.connect(self.catchNextException)
self.clearExceptionBtn.clicked.connect(self.clearExceptionClicked)
self.stackTree.itemClicked.connect(self.stackItemClicked)
self.stackTree.itemDoubleClicked.connect(self.stackItemDblClicked)
self.onlyUncaughtCheck.toggled.connect(self.updateSysTrace)
self.filterText.textChanged.connect(self._filterTextChanged)
def setStack(self, frame=None):
self.clearExceptionBtn.setEnabled(True)
self.stackTree.setStack(frame)
def setException(self, exc=None, lastFrame=None):
self.clearExceptionBtn.setEnabled(True)
self.stackTree.setException(exc, lastFrame=lastFrame)
def selectedFrame(self):
return self.stackTree.selectedFrame()
def catchAllExceptions(self, catch=True):
"""
If True, the console will catch all unhandled exceptions and display the stack
trace. Each exception caught clears the last.
"""
with SignalBlock(self.catchAllExceptionsBtn.toggled, self.catchAllExceptions):
self.catchAllExceptionsBtn.setChecked(catch)
if catch:
with SignalBlock(self.catchNextExceptionBtn.toggled, self.catchNextException):
self.catchNextExceptionBtn.setChecked(False)
self.enableExceptionHandling()
else:
self.disableExceptionHandling()
def catchNextException(self, catch=True):
"""
If True, the console will catch the next unhandled exception and display the stack
trace.
"""
with SignalBlock(self.catchNextExceptionBtn.toggled, self.catchNextException):
self.catchNextExceptionBtn.setChecked(catch)
if catch:
with SignalBlock(self.catchAllExceptionsBtn.toggled, self.catchAllExceptions):
self.catchAllExceptionsBtn.setChecked(False)
self.enableExceptionHandling()
else:
self.disableExceptionHandling()
def enableExceptionHandling(self):
exceptionHandling.registerCallback(self.exceptionHandler)
self.updateSysTrace()
def disableExceptionHandling(self):
exceptionHandling.unregisterCallback(self.exceptionHandler)
self.updateSysTrace()
def clearExceptionClicked(self):
self.stackTree.clear()
self.clearExceptionBtn.setEnabled(False)
def updateSysTrace(self):
## Install or uninstall sys.settrace handler
if not self.catchNextExceptionBtn.isChecked() and not self.catchAllExceptionsBtn.isChecked():
if sys.gettrace() == self.systrace:
self._disableSysTrace()
return
if self.onlyUncaughtCheck.isChecked():
if sys.gettrace() == self.systrace:
self._disableSysTrace()
else:
if sys.gettrace() not in (None, self.systrace):
self.onlyUncaughtCheck.setChecked(False)
raise Exception("sys.settrace is in use (are you using another debugger?); cannot monitor for caught exceptions.")
else:
self._enableSysTrace()
def _enableSysTrace(self):
# set global trace function
# note: this has no effect on pre-existing frames or threads
# until settrace_all_threads arrives in python 3.12.
sys.settrace(self.systrace) # affects current thread only
threading.settrace(self.systrace) # affects new threads only
if hasattr(threading, 'settrace_all_threads'):
threading.settrace_all_threads(self.systrace)
def _disableSysTrace(self):
sys.settrace(None)
threading.settrace(None)
if hasattr(threading, 'settrace_all_threads'):
threading.settrace_all_threads(None)
def exceptionHandler(self, excInfo, lastFrame=None):
if isinstance(excInfo, Exception):
exc = excInfo
else:
exc = excInfo.exc_value
# exceptions raised in non-gui threads must be sent to the gui thread by signal
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if not isGuiThread:
# note: we are giving the user the ability to modify a frame owned by another thread..
# expect trouble :)
self._threadException.emit((excInfo, lastFrame))
return
if self.catchNextExceptionBtn.isChecked():
self.catchNextExceptionBtn.setChecked(False)
elif not self.catchAllExceptionsBtn.isChecked():
return
self.setException(exc, lastFrame=lastFrame)
def _threadExceptionHandler(self, args):
self.exceptionHandler(*args)
def systrace(self, frame, event, arg):
if event != 'exception':
return self.systrace
if self._inSystrace:
# prevent recursve calling
return self.systrace
self._inSystrace = True
try:
if self.checkException(*arg):
# note: the exception has no __traceback__ at this point!
self.exceptionHandler(arg[1], lastFrame=frame)
except Exception as exc:
print("Exception in systrace:")
traceback.print_exc()
finally:
self._inSystrace = False
return self.systrace
def checkException(self, excType, exc, tb):
## Return True if the exception is interesting; False if it should be ignored.
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
filterStr = self.filterString
if filterStr != '':
if isinstance(exc, Exception):
msg = traceback.format_exception_only(type(exc), exc)
elif isinstance(exc, str):
msg = exc
else:
msg = repr(exc)
match = re.search(filterStr, "%s:%s:%s" % (filename, function, msg))
return match is not None
## Go through a list of common exception points we like to ignore:
if excType is GeneratorExit or excType is StopIteration:
return False
if excType is AttributeError:
if filename.endswith('numpy/core/fromnumeric.py') and function in ('all', '_wrapit', 'transpose', 'sum'):
return False
if filename.endswith('numpy/core/arrayprint.py') and function in ('_array2string'):
return False
if filename.endswith('flowchart/eq.py'):
return False
if excType is TypeError:
if filename.endswith('numpy/lib/function_base.py') and function == 'iterable':
return False
return True
def stackItemClicked(self, item):
self.sigStackItemClicked.emit(self, item)
def stackItemDblClicked(self, item):
self.sigStackItemDblClicked.emit(self, item)
def _filterTextChanged(self, value):
self.filterString = str(value)
| ExceptionHandlerWidget |
python | ray-project__ray | doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py | {
"start": 2584,
"end": 5580
} | class ____(nn.Module):
"""
Decomposition-Linear (DLinear) model.
"""
def __init__(self, configs: Dict[str, Any]):
super().__init__()
self.seq_len: int = configs["seq_len"]
self.pred_len: int = configs["pred_len"]
self.decompsition = series_decomp(kernel_size=KERNEL_SIZE)
self.individual: bool = configs["individual"]
self.channels: int = configs["enc_in"]
if self.individual:
self.Linear_Seasonal = nn.ModuleList()
self.Linear_Trend = nn.ModuleList()
for _ in range(self.channels):
self.Linear_Seasonal.append(nn.Linear(self.seq_len, self.pred_len))
self.Linear_Trend.append(nn.Linear(self.seq_len, self.pred_len))
else:
self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len)
self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass for the DLinear model.
Args:
x (torch.Tensor): Input tensor. Can be 2D [Batch, SeqLen] (interpreted as 1 channel)
or 3D [Batch, SeqLen, Channels].
Returns:
torch.Tensor: Output tensor of shape [Batch, PredLen, Channels].
"""
# DLinear model (and many time series models) expect input of shape:
# (batch_size, sequence_length, num_input_features).
# seasonal_init, trend_init shapes: [Batch, SeqLen, Channel].
seasonal_init, trend_init = self.decompsition(x)
# Permute to [Batch, Channel, SeqLen] for Linear layers.
seasonal_init = seasonal_init.permute(0, 2, 1)
trend_init = trend_init.permute(0, 2, 1)
if self.individual:
seasonal_output = torch.zeros(
[seasonal_init.size(0), seasonal_init.size(1), self.pred_len],
dtype=seasonal_init.dtype,
).to(seasonal_init.device)
trend_output = torch.zeros(
[trend_init.size(0), trend_init.size(1), self.pred_len],
dtype=trend_init.dtype,
).to(trend_init.device)
for i in range(self.channels):
seasonal_output[:, i, :] = self.Linear_Seasonal[i](
seasonal_init[:, i, :]
)
trend_output[:, i, :] = self.Linear_Trend[i](trend_init[:, i, :])
else:
# seasonal_init shape: [Batch, Channel, SeqLen].
# Linear layer applies to the last dim (SeqLen).
seasonal_output = self.Linear_Seasonal(
seasonal_init
) # Output: [Batch, Channel, PredLen].
trend_output = self.Linear_Trend(
trend_init
) # Output: [Batch, Channel, PredLen].
output_x = seasonal_output + trend_output # Shape: [Batch, Channel, PredLen].
return output_x.permute(0, 2, 1) # Transform to [Batch, PredLen, Channel].
| DLinear |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/config/jinja_template_loader.py | {
"start": 121,
"end": 1441
} | class ____:
"""Reads and render a file with jinja2 templating support and provide additional templating functionality.
Ref: https://jinja.palletsprojects.com/en/stable/
In addition to the built-in global functions and filters, this class provides the following:
Custom jinja2 global function(s):
- get_env(name: str, default: str = "") -> str
Get environment variable if present or return a default value.
Custom jinja2 filter(s):
- to_yaml(input: Any, **kwargs) -> str
Renders a value into its yaml representation.
"""
@staticmethod
def to_yaml(value: Any, **kwargs) -> str:
return yaml.dump(value, **kwargs)
@staticmethod
def get_env(name: str, default: str = "") -> str:
return getenv(name, default)
def render(self, filepath: str, context: Mapping[str, Any]) -> str:
# defer for import performance
from jinja2 import Environment, FileSystemLoader
path = Path(filepath)
template_path = path.resolve().parent
filename = path.name
env = Environment(loader=FileSystemLoader(template_path))
env.filters["to_yaml"] = self.to_yaml
env.globals["get_env"] = self.get_env
template = env.get_template(filename)
return template.render(context)
| JinjaTemplateLoader |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_delete_simple_app/migrations/0003_delete.py | {
"start": 190,
"end": 456
} | class ____(CheckedMigration):
dependencies = [
("good_flow_delete_simple_app", "0002_set_pending"),
]
operations = [
SafeDeleteModel(
name="TestTable",
deletion_action=DeletionAction.DELETE,
),
]
| Migration |
python | apache__thrift | lib/py/src/protocol/TJSONProtocol.py | {
"start": 11907,
"end": 16582
} | class ____(TJSONProtocolBase):
def readMessageBegin(self):
self.resetReadContext()
self.readJSONArrayStart()
if self.readJSONInteger() != VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
"Message contained bad version.")
name = self.readJSONString(False)
typen = self.readJSONInteger()
seqid = self.readJSONInteger()
return (name, typen, seqid)
def readMessageEnd(self):
self.readJSONArrayEnd()
def readStructBegin(self):
self.readJSONObjectStart()
def readStructEnd(self):
self.readJSONObjectEnd()
def readFieldBegin(self):
character = self.reader.peek()
ttype = 0
id = 0
if character == RBRACE:
ttype = TType.STOP
else:
id = self.readJSONInteger()
self.readJSONObjectStart()
ttype = JTYPES[self.readJSONString(False)]
return (None, ttype, id)
def readFieldEnd(self):
self.readJSONObjectEnd()
def readMapBegin(self):
self.readJSONArrayStart()
keyType = JTYPES[self.readJSONString(False)]
valueType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
self.readJSONObjectStart()
return (keyType, valueType, size)
def readMapEnd(self):
self.readJSONObjectEnd()
self.readJSONArrayEnd()
def readCollectionBegin(self):
self.readJSONArrayStart()
elemType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
return (elemType, size)
readListBegin = readCollectionBegin
readSetBegin = readCollectionBegin
def readCollectionEnd(self):
self.readJSONArrayEnd()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
def readBool(self):
return (False if self.readJSONInteger() == 0 else True)
def readNumber(self):
return self.readJSONInteger()
readByte = readNumber
readI16 = readNumber
readI32 = readNumber
readI64 = readNumber
def readDouble(self):
return self.readJSONDouble()
def readString(self):
return self.readJSONString(False)
def readBinary(self):
return self.readJSONBase64()
def writeMessageBegin(self, name, request_type, seqid):
self.resetWriteContext()
self.writeJSONArrayStart()
self.writeJSONNumber(VERSION)
self.writeJSONString(name)
self.writeJSONNumber(request_type)
self.writeJSONNumber(seqid)
def writeMessageEnd(self):
self.writeJSONArrayEnd()
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, ttype, id):
self.writeJSONNumber(id)
self.writeJSONObjectStart()
self.writeJSONString(CTYPES[ttype])
def writeFieldEnd(self):
self.writeJSONObjectEnd()
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[ktype])
self.writeJSONString(CTYPES[vtype])
self.writeJSONNumber(size)
self.writeJSONObjectStart()
def writeMapEnd(self):
self.writeJSONObjectEnd()
self.writeJSONArrayEnd()
def writeListBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeListEnd(self):
self.writeJSONArrayEnd()
def writeSetBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeSetEnd(self):
self.writeJSONArrayEnd()
def writeBool(self, boolean):
self.writeJSONNumber(1 if boolean is True else 0)
def writeByte(self, byte):
checkIntegerLimits(byte, 8)
self.writeJSONNumber(byte)
def writeI16(self, i16):
checkIntegerLimits(i16, 16)
self.writeJSONNumber(i16)
def writeI32(self, i32):
checkIntegerLimits(i32, 32)
self.writeJSONNumber(i32)
def writeI64(self, i64):
checkIntegerLimits(i64, 64)
self.writeJSONNumber(i64)
def writeDouble(self, dbl):
# 17 significant digits should be just enough for any double precision
# value.
self.writeJSONNumber(dbl, '{0:.17g}')
def writeString(self, string):
self.writeJSONString(string)
def writeBinary(self, binary):
self.writeJSONBase64(binary)
| TJSONProtocol |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/events.py | {
"start": 2423,
"end": 2690
} | class ____(Event):
__slots__ = ('anchor',)
def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.anchor = anchor
| NodeEvent |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_hash_returned.py | {
"start": 404,
"end": 507
} | class ____(type):
def __hash__(cls):
return 1
@six.add_metaclass(HashMetaclass)
| HashMetaclass |
python | django__django | tests/sessions_tests/tests.py | {
"start": 29272,
"end": 29345
} | class ____(DatabaseSessionTests):
pass
| DatabaseSessionWithTimeZoneTests |
python | openai__openai-python | src/openai/lib/streaming/_assistants.py | {
"start": 33977,
"end": 40692
} | class ____(Generic[AsyncAssistantEventHandlerT]):
"""Wrapper over AsyncAssistantStreamEventHandler that is returned by `.stream()`
so that an async context manager can be used without `await`ing the
original client call.
```py
async with client.threads.create_and_run_stream(...) as stream:
async for event in stream:
...
```
"""
def __init__(
self,
api_request: Awaitable[AsyncStream[AssistantStreamEvent]],
*,
event_handler: AsyncAssistantEventHandlerT,
) -> None:
self.__stream: AsyncStream[AssistantStreamEvent] | None = None
self.__event_handler = event_handler
self.__api_request = api_request
async def __aenter__(self) -> AsyncAssistantEventHandlerT:
self.__stream = await self.__api_request
self.__event_handler._init(self.__stream)
return self.__event_handler
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
await self.__stream.close()
def accumulate_run_step(
*,
event: AssistantStreamEvent,
run_step_snapshots: dict[str, RunStep],
) -> None:
if event.event == "thread.run.step.created":
run_step_snapshots[event.data.id] = event.data
return
if event.event == "thread.run.step.delta":
data = event.data
snapshot = run_step_snapshots[data.id]
if data.delta:
merged = accumulate_delta(
cast(
"dict[object, object]",
model_dump(snapshot, exclude_unset=True, warnings=False),
),
cast(
"dict[object, object]",
model_dump(data.delta, exclude_unset=True, warnings=False),
),
)
run_step_snapshots[snapshot.id] = cast(RunStep, construct_type(type_=RunStep, value=merged))
return None
def accumulate_event(
*,
event: AssistantStreamEvent,
current_message_snapshot: Message | None,
) -> tuple[Message | None, list[MessageContentDelta]]:
"""Returns a tuple of message snapshot and newly created text message deltas"""
if event.event == "thread.message.created":
return event.data, []
new_content: list[MessageContentDelta] = []
if event.event != "thread.message.delta":
return current_message_snapshot, []
if not current_message_snapshot:
raise RuntimeError("Encountered a message delta with no previous snapshot")
data = event.data
if data.delta.content:
for content_delta in data.delta.content:
try:
block = current_message_snapshot.content[content_delta.index]
except IndexError:
current_message_snapshot.content.insert(
content_delta.index,
cast(
MessageContent,
construct_type(
# mypy doesn't allow Content for some reason
type_=cast(Any, MessageContent),
value=model_dump(content_delta, exclude_unset=True, warnings=False),
),
),
)
new_content.append(content_delta)
else:
merged = accumulate_delta(
cast(
"dict[object, object]",
model_dump(block, exclude_unset=True, warnings=False),
),
cast(
"dict[object, object]",
model_dump(content_delta, exclude_unset=True, warnings=False),
),
)
current_message_snapshot.content[content_delta.index] = cast(
MessageContent,
construct_type(
# mypy doesn't allow Content for some reason
type_=cast(Any, MessageContent),
value=merged,
),
)
return current_message_snapshot, new_content
def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
for key, delta_value in delta.items():
if key not in acc:
acc[key] = delta_value
continue
acc_value = acc[key]
if acc_value is None:
acc[key] = delta_value
continue
# the `index` property is used in arrays of objects so it should
# not be accumulated like other values e.g.
# [{'foo': 'bar', 'index': 0}]
#
# the same applies to `type` properties as they're used for
# discriminated unions
if key == "index" or key == "type":
acc[key] = delta_value
continue
if isinstance(acc_value, str) and isinstance(delta_value, str):
acc_value += delta_value
elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
acc_value += delta_value
elif is_dict(acc_value) and is_dict(delta_value):
acc_value = accumulate_delta(acc_value, delta_value)
elif is_list(acc_value) and is_list(delta_value):
# for lists of non-dictionary items we'll only ever get new entries
# in the array, existing entries will never be changed
if all(isinstance(x, (str, int, float)) for x in acc_value):
acc_value.extend(delta_value)
continue
for delta_entry in delta_value:
if not is_dict(delta_entry):
raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
try:
index = delta_entry["index"]
except KeyError as exc:
raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
if not isinstance(index, int):
raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
try:
acc_entry = acc_value[index]
except IndexError:
acc_value.insert(index, delta_entry)
else:
if not is_dict(acc_entry):
raise TypeError("not handled yet")
acc_value[index] = accumulate_delta(acc_entry, delta_entry)
acc[key] = acc_value
return acc
| AsyncAssistantStreamManager |
python | miyuchina__mistletoe | mistletoe/base_renderer.py | {
"start": 93,
"end": 7370
} | class ____(object):
"""
Base class for renderers.
All renderers should ...
* ... define all render functions specified in self.render_map;
* ... be a context manager (by inheriting __enter__ and __exit__);
Custom renderers could ...
* ... add additional tokens into the parsing process by passing custom
tokens to super().__init__();
* ... add additional render functions by appending to self.render_map;
Usage:
Suppose SomeRenderer inherits BaseRenderer, and fin is the input file.
The syntax looks something like this:
>>> from mistletoe import Document
>>> from some_renderer import SomeRenderer
>>> with SomeRenderer() as renderer:
... rendered = renderer.render(Document(fin))
See mistletoe.html_renderer for an implementation example.
Naming conventions:
* The keys of self.render_map should exactly match the class
name of tokens;
* Render function names should be of form: "render_" + the
"snake-case" form of token's class name.
Attributes:
render_map (dict): maps tokens to their corresponding render functions.
_extras (list): a list of custom tokens to be added to the
parsing process.
"""
_parse_name = re.compile(r"([A-Z][a-z]+|[A-Z]+(?![a-z]))")
def __init__(self, *extras, **kwargs):
self.render_map = {
'Strong': self.render_strong,
'Emphasis': self.render_emphasis,
'InlineCode': self.render_inline_code,
'RawText': self.render_raw_text,
'Strikethrough': self.render_strikethrough,
'Image': self.render_image,
'Link': self.render_link,
'AutoLink': self.render_auto_link,
'EscapeSequence': self.render_escape_sequence,
'Heading': self.render_heading,
'SetextHeading': self.render_heading,
'Quote': self.render_quote,
'Paragraph': self.render_paragraph,
'CodeFence': self.render_block_code,
'BlockCode': self.render_block_code,
'List': self.render_list,
'ListItem': self.render_list_item,
'Table': self.render_table,
'TableRow': self.render_table_row,
'TableCell': self.render_table_cell,
'ThematicBreak': self.render_thematic_break,
'LineBreak': self.render_line_break,
'Document': self.render_document,
}
self._extras = extras
for token in extras:
if issubclass(token, span_token.SpanToken):
token_module = span_token
else:
token_module = block_token
token_module.add_token(token)
render_func = getattr(self, self._cls_to_func(token.__name__))
self.render_map[token.__name__] = render_func
self.footnotes = {}
def render(self, token):
"""
Grabs the class name from input token and finds its corresponding
render function.
Basically a janky way to do polymorphism.
Arguments:
token: whose __class__.__name__ is in self.render_map.
"""
return self.render_map[token.__class__.__name__](token)
def render_inner(self, token) -> str:
"""
Recursively renders child tokens. Joins the rendered
strings with no space in between.
If newlines / spaces are needed between tokens, add them
in their respective templates, or override this function
in the renderer subclass, so that whitespace won't seem to
appear magically for anyone reading your program.
Arguments:
token: a branch node who has children attribute.
"""
return ''.join(map(self.render, token.children))
def __enter__(self):
"""
Make renderer classes into context managers.
"""
return self
def __exit__(self, exception_type, exception_val, traceback):
"""
Make renderer classes into context managers.
Reset block_token._token_types and span_token._token_types.
"""
block_token.reset_tokens()
span_token.reset_tokens()
@classmethod
def _cls_to_func(cls, cls_name):
snake = '_'.join(map(str.lower, cls._parse_name.findall(cls_name)))
return 'render_{}'.format(snake)
@staticmethod
def _tokens_from_module(module):
"""
Helper method; takes a module and returns a list of all token classes
specified in module.__all__. Useful when custom tokens are defined in a
separate module.
"""
return [getattr(module, name) for name in module.__all__]
def render_raw_text(self, token) -> str:
"""
Default render method for RawText. Simply return token.content.
"""
return token.content
def render_strong(self, token: span_token.Strong) -> str:
return self.render_inner(token)
def render_emphasis(self, token: span_token.Emphasis) -> str:
return self.render_inner(token)
def render_inline_code(self, token: span_token.InlineCode) -> str:
return self.render_inner(token)
def render_strikethrough(self, token: span_token.Strikethrough) -> str:
return self.render_inner(token)
def render_image(self, token: span_token.Image) -> str:
return self.render_inner(token)
def render_link(self, token: span_token.Link) -> str:
return self.render_inner(token)
def render_auto_link(self, token: span_token.AutoLink) -> str:
return self.render_inner(token)
def render_escape_sequence(self, token: span_token.EscapeSequence) -> str:
return self.render_inner(token)
def render_line_break(self, token: span_token.LineBreak) -> str:
return self.render_inner(token)
def render_heading(self, token: block_token.Heading) -> str:
return self.render_inner(token)
def render_quote(self, token: block_token.Quote) -> str:
return self.render_inner(token)
def render_paragraph(self, token: block_token.Paragraph) -> str:
return self.render_inner(token)
def render_block_code(self, token: block_token.BlockCode) -> str:
return self.render_inner(token)
def render_list(self, token: block_token.List) -> str:
return self.render_inner(token)
def render_list_item(self, token: block_token.ListItem) -> str:
return self.render_inner(token)
def render_table(self, token: block_token.Table) -> str:
return self.render_inner(token)
def render_table_cell(self, token: block_token.TableCell) -> str:
return self.render_inner(token)
def render_table_row(self, token: block_token.TableRow) -> str:
return self.render_inner(token)
def render_thematic_break(self, token: block_token.ThematicBreak) -> str:
return self.render_inner(token)
def render_document(self, token: block_token.Document) -> str:
return self.render_inner(token)
| BaseRenderer |
python | ray-project__ray | rllib/utils/replay_buffers/multi_agent_episode_buffer.py | {
"start": 724,
"end": 54164
} | class ____(EpisodeReplayBuffer):
"""Multi-agent episode replay buffer that stores episodes by their IDs.
This class implements a replay buffer as used in "playing Atari with Deep
Reinforcement Learning" (Mnih et al., 2013) for multi-agent reinforcement
learning,
Each "row" (a slot in a deque) in the buffer is occupied by one episode. If an
incomplete episode is added to the buffer and then another chunk of that episode is
added at a later time, the buffer will automatically concatenate the new fragment to
the original episode. This way, episodes can be completed via subsequent `add`
calls.
Sampling returns a size `B` episode list (number of 'rows'), where each episode
holds a tuple tuple of the form
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
where `o_t` is the observation in `t`, `a_t` the action chosen at observation `o_t`,
`o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is the sum of
all rewards collected over the time steps between `t+1` and `t+n`. The `n`-step can
be chosen freely when sampling and defaults to `1`. If `n_step` is a tuple it is
sampled uniformly across the interval defined by the tuple (for each row in the
batch).
Each episode contains - in addition to the data tuples presented above - two further
elements in its `extra_model_outputs`, namely `n_steps` and `weights`. The former
holds the `n_step` used for the sampled timesteps in the episode and the latter the
corresponding (importance sampling) weight for the transition.
.. testcode::
import gymnasium as gym
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
from ray.rllib.utils.replay_buffers import MultiAgentEpisodeReplayBuffer
# Create the environment.
env = MultiAgentCartPole({"num_agents": 2})
# Set up the loop variables
agent_ids = env.agents
agent_ids.append("__all__")
terminateds = {aid: False for aid in agent_ids}
truncateds = {aid: False for aid in agent_ids}
num_timesteps = 10000
episodes = []
# Initialize the first episode entries.
eps = MultiAgentEpisode()
obs, infos = env.reset()
eps.add_env_reset(observations=obs, infos=infos)
# Sample 10,000 env timesteps.
for i in range(num_timesteps):
# If terminated we create a new episode.
if eps.is_done:
episodes.append(eps.to_numpy())
eps = MultiAgentEpisode()
terminateds = {aid: False for aid in agent_ids}
truncateds = {aid: False for aid in agent_ids}
obs, infos = env.reset()
eps.add_env_reset(observations=obs, infos=infos)
# Sample a random action for all agents that should step in the episode
# next.
actions = {
aid: env.get_action_space(aid).sample()
for aid in eps.get_agents_to_act()
}
obs, rewards, terminateds, truncateds, infos = env.step(actions)
eps.add_env_step(
obs,
actions,
rewards,
infos,
terminateds=terminateds,
truncateds=truncateds
)
# Add the last (truncated) episode to the list of episodes.
if not eps.is_done:
episodes.append(eps)
# Create the buffer.
buffer = MultiAgentEpisodeReplayBuffer()
# Add the list of episodes sampled.
buffer.add(episodes)
# Pull a sample from the buffer using an `n-step` of 3.
sample = buffer.sample(num_items=256, gamma=0.95, n_step=3)
"""
def __init__(
self,
capacity: int = 10000,
*,
batch_size_B: int = 16,
batch_length_T: int = 1,
metrics_num_episodes_for_smoothing: int = 100,
**kwargs,
):
"""Initializes a multi-agent episode replay buffer.
Args:
capacity: The total number of timesteps to be storable in this buffer.
Will start ejecting old episodes once this limit is reached.
batch_size_B: The number of episodes returned from `sample()`.
batch_length_T: The length of each episode in the episode list returned from
`sample()`.
"""
# Initialize the base episode replay buffer.
super().__init__(
capacity=capacity,
batch_size_B=batch_size_B,
batch_length_T=batch_length_T,
metrics_num_episodes_for_smoothing=metrics_num_episodes_for_smoothing,
**kwargs,
)
# Stores indices of module (single-agent) timesteps. Each index is a tuple
# of the form:
# `(ma_episode_idx, agent_id, timestep)`.
# This information is stored for each timestep of an episode and is used in
# the `"independent"`` sampling process. The multi-agent episode index amd the
# agent ID are used to retrieve the single-agent episode. The timestep is then
# needed to retrieve the corresponding timestep data from that single-agent
# episode.
self._module_to_indices: Dict[
ModuleID, List[Tuple[int, AgentID, int]]
] = defaultdict(list)
# Stores the number of single-agent timesteps in the buffer.
self._num_agent_timesteps: int = 0
# Stores the number of single-agent timesteps per module.
self._num_module_timesteps: Dict[ModuleID, int] = defaultdict(int)
# Stores the number of added single-agent timesteps over the
# lifetime of the buffer.
self._num_agent_timesteps_added: int = 0
# Stores the number of added single-agent timesteps per module
# over the lifetime of the buffer.
self._num_module_timesteps_added: Dict[ModuleID, int] = defaultdict(int)
self._num_module_episodes: Dict[ModuleID, int] = defaultdict(int)
# Stores the number of module episodes evicted. Note, this is
# important for indexing.
self._num_module_episodes_evicted: Dict[ModuleID, int] = defaultdict(int)
# Stores hte number of module timesteps sampled.
self.sampled_timesteps_per_module: Dict[ModuleID, int] = defaultdict(int)
@override(EpisodeReplayBuffer)
def add(
self,
episodes: Union[List["MultiAgentEpisode"], "MultiAgentEpisode"],
) -> None:
"""Adds episodes to the replay buffer.
Note, if the incoming episodes' time steps cause the buffer to overflow,
older episodes are evicted. Because episodes usually come in chunks and
not complete, this could lead to edge cases (e.g. with very small capacity
or very long episode length) where the first part of an episode is evicted
while the next part just comes in.
To defend against such case, the complete episode is evicted, including
the new chunk, unless the episode is the only one in the buffer. In the
latter case the buffer will be allowed to overflow in a temporary fashion,
i.e. during the next addition of samples to the buffer an attempt is made
to fall below capacity again.
The user is advised to select a large enough buffer with regard to the maximum
expected episode length.
Args:
episodes: The multi-agent episodes to add to the replay buffer. Can be a
single episode or a list of episodes.
"""
episodes: List["MultiAgentEpisode"] = force_list(episodes)
new_episode_ids: Set[str] = {eps.id_ for eps in episodes}
total_env_timesteps = sum([eps.env_steps() for eps in episodes])
self._num_timesteps += total_env_timesteps
self._num_timesteps_added += total_env_timesteps
# Set up some counters for metrics.
num_env_steps_added = 0
agent_to_num_steps_added = defaultdict(int)
module_to_num_steps_added = defaultdict(int)
num_episodes_added = 0
agent_to_num_episodes_added = defaultdict(int)
module_to_num_episodes_added = defaultdict(int)
num_episodes_evicted = 0
agent_to_num_episodes_evicted = defaultdict(int)
module_to_num_episodes_evicted = defaultdict(int)
num_env_steps_evicted = 0
agent_to_num_steps_evicted = defaultdict(int)
module_to_num_steps_evicted = defaultdict(int)
# Evict old episodes.
eps_evicted_ids: Set[Union[str, int]] = set()
eps_evicted_idxs: Set[int] = set()
while (
self._num_timesteps > self.capacity
and self._num_remaining_episodes(new_episode_ids, eps_evicted_ids) != 1
):
# Evict episode.
evicted_episode = self.episodes.popleft()
eps_evicted_ids.add(evicted_episode.id_)
eps_evicted_idxs.add(self.episode_id_to_index.pop(evicted_episode.id_))
# If this episode has a new chunk in the new episodes added,
# we subtract it again.
# TODO (sven, simon): Should we just treat such an episode chunk
# as a new episode?
if evicted_episode.id_ in new_episode_ids:
idx = next(
i
for i, eps in enumerate(episodes)
if eps.id_ == evicted_episode.id_
)
new_eps_to_evict = episodes.pop(idx)
self._num_timesteps -= new_eps_to_evict.env_steps()
self._num_timesteps_added -= new_eps_to_evict.env_steps()
# Remove the timesteps of the evicted episode from the counter.
self._num_timesteps -= evicted_episode.env_steps()
self._num_agent_timesteps -= evicted_episode.agent_steps()
self._num_episodes_evicted += 1
# Increase the counters.
num_episodes_evicted += 1
num_env_steps_evicted += evicted_episode.env_steps()
for aid, a_eps in evicted_episode.agent_episodes.items():
mid = evicted_episode._agent_to_module_mapping[aid]
agent_to_num_episodes_evicted[aid] += 1
module_to_num_episodes_evicted[mid] += 1
agent_to_num_steps_evicted[aid] += a_eps.agent_steps()
module_to_num_steps_evicted[mid] += a_eps.agent_steps()
# Remove the module timesteps of the evicted episode from the counters.
self._evict_module_episodes(evicted_episode)
del evicted_episode
# Add agent and module steps.
for eps in episodes:
self._num_agent_timesteps += eps.agent_steps()
self._num_agent_timesteps_added += eps.agent_steps()
# Update the module counters by the module timesteps.
self._update_module_counters(eps)
# Remove corresponding indices, if episodes were evicted.
if eps_evicted_idxs:
# If the episode is not evicted, we keep the index.
# Note, each index 2-tuple is of the form (ma_episode_idx, timestep)
# and refers to a certain environment timestep in a certain
# multi-agent episode.
self._indices = [
idx_tuple
for idx_tuple in self._indices
if idx_tuple[0] not in eps_evicted_idxs
]
# Also remove corresponding module indices.
for module_id, module_indices in self._module_to_indices.items():
# Each index 3-tuple is of the form
# (ma_episode_idx, agent_id, timestep) and refers to a certain
# agent timestep in a certain multi-agent episode.
self._module_to_indices[module_id] = [
idx_triplet
for idx_triplet in module_indices
if idx_triplet[0] not in eps_evicted_idxs
]
for eps in episodes:
eps = copy.deepcopy(eps)
# If the episode is part of an already existing episode, concatenate.
if eps.id_ in self.episode_id_to_index:
eps_idx = self.episode_id_to_index[eps.id_]
existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]
existing_len = len(existing_eps)
self._indices.extend(
[
(
eps_idx,
existing_len + i,
)
for i in range(len(eps))
]
)
# Add new module indices.
self._add_new_module_indices(eps, eps_idx, True)
# Concatenate the episode chunk.
existing_eps.concat_episode(eps)
# Otherwise, create a new entry.
else:
# New episode.
self.episodes.append(eps)
# Update the counters
num_episodes_added += 1
for aid, a_eps in eps.agent_episodes.items():
mid = eps._agent_to_module_mapping[aid]
agent_to_num_episodes_added[aid] += 1
module_to_num_episodes_added[mid] += 1
eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted
self.episode_id_to_index[eps.id_] = eps_idx
self._indices.extend([(eps_idx, i) for i in range(len(eps))])
# Add new module indices.
self._add_new_module_indices(eps, eps_idx, False)
# Update the step counters.
num_env_steps_added += eps.env_steps()
for aid, e_eps in eps.agent_episodes.items():
mid = eps._agent_to_module_mapping[aid]
agent_to_num_steps_added[aid] += e_eps.agent_steps()
module_to_num_steps_added[mid] += e_eps.agent_steps()
# Update the adding metrics.
self._update_add_metrics(
num_episodes_added=num_episodes_added,
num_env_steps_added=num_env_steps_added,
num_episodes_evicted=num_episodes_evicted,
num_env_steps_evicted=num_env_steps_evicted,
agent_to_num_episodes_added=agent_to_num_episodes_added,
agent_to_num_steps_added=agent_to_num_steps_added,
agent_to_num_episodes_evicted=agent_to_num_episodes_evicted,
agent_to_num_steps_evicted=agent_to_num_steps_evicted,
module_to_num_episodes_added=module_to_num_steps_added,
module_to_num_steps_added=module_to_num_episodes_added,
module_to_num_episodes_evicted=module_to_num_episodes_evicted,
module_to_num_steps_evicted=module_to_num_steps_evicted,
)
@override(EpisodeReplayBuffer)
def sample(
self,
num_items: Optional[int] = None,
*,
batch_size_B: Optional[int] = None,
batch_length_T: Optional[int] = None,
n_step: Optional[Union[int, Tuple]] = 1,
gamma: float = 0.99,
include_infos: bool = False,
include_extra_model_outputs: bool = False,
replay_mode: str = "independent",
modules_to_sample: Optional[List[ModuleID]] = None,
**kwargs,
) -> Union[List["MultiAgentEpisode"], List["SingleAgentEpisode"]]:
"""Samples a batch of multi-agent transitions.
Multi-agent transitions can be sampled either `"independent"` or
`"synchronized"` with the former sampling for each module independent agent
steps and the latter sampling agent transitions from the same environment step.
The n-step parameter can be either a single integer or a tuple of two integers.
In the former case, the n-step is fixed to the given integer and in the latter
case, the n-step is sampled uniformly from the given range. Large n-steps could
potentially lead to a many retries because not all samples might have a full
n-step transition.
Sampling returns batches of size B (number of 'rows'), where each row is a tuple
of the form
`(o_t, a_t, sum(r_t+1:t+n), o_t+n)`
where `o_t` is the observation in `t`, `a_t` the action chosen at observation
`o_t`, `o_t+n` is the observation `n` timesteps later and `sum(r_t+1:t+n)` is
the sum of all rewards collected over the time steps between `t+1` and `t+n`.
The n`-step can be chosen freely when sampling and defaults to `1`. If `n_step`
is a tuple it is sampled uniformly across the interval defined by the tuple (for
each row in the batch).
Each batch contains - in addition to the data tuples presented above - two
further columns, namely `n_steps` and `weigths`. The former holds the `n_step`
used for each row in the batch and the latter a (default) weight of `1.0` for
each row in the batch. This weight is used for weighted loss calculations in
the training process.
Args:
num_items: The number of items to sample. If provided, `batch_size_B`
should be `None`.
batch_size_B: The batch size to sample. If provided, `num_items`
should be `None`.
batch_length_T: The length of the sampled batch. If not provided, the
default batch length is used. This feature is not yet implemented.
n_step: The n-step to sample. If the n-step is a tuple, the n-step is
sampled uniformly from the given range. If not provided, the default
n-step of `1` is used.
gamma: The discount factor for the n-step reward calculation.
include_infos: Whether to include the infos in the sampled batch.
include_extra_model_outputs: Whether to include the extra model outputs
in the sampled batch.
replay_mode: The replay mode to use for sampling. Either `"independent"`
or `"synchronized"`.
modules_to_sample: A list of module IDs to sample from. If not provided,
transitions for aall modules are sampled.
Returns:
A dictionary of the form `ModuleID -> SampleBatchType` containing the
sampled data for each module or each module in `modules_to_sample`,
if provided.
"""
if num_items is not None:
assert batch_size_B is None, (
"Cannot call `sample()` with both `num_items` and `batch_size_B` "
"provided! Use either one."
)
batch_size_B = num_items
# Use our default values if no sizes/lengths provided.
batch_size_B = batch_size_B or self.batch_size_B
# TODO (simon): Implement trajectory sampling for RNNs.
batch_length_T = batch_length_T or self.batch_length_T
# Sample for each module independently.
if replay_mode == "independent":
return self._sample_independent(
batch_size_B=batch_size_B,
batch_length_T=batch_length_T,
n_step=n_step,
gamma=gamma,
include_infos=include_infos,
include_extra_model_outputs=include_extra_model_outputs,
modules_to_sample=modules_to_sample,
)
else:
return self._sample_synchonized(
batch_size_B=batch_size_B,
batch_length_T=batch_length_T,
n_step=n_step,
gamma=gamma,
include_infos=include_infos,
include_extra_model_outputs=include_extra_model_outputs,
modules_to_sample=modules_to_sample,
)
def get_added_agent_timesteps(self) -> int:
"""Returns number of agent timesteps that have been added in buffer's lifetime.
Note, this could be more than the `get_added_timesteps` returns as an
environment timestep could contain multiple agent timesteps (for eaxch agent
one).
"""
return self._num_agent_timesteps_added
def get_module_ids(self) -> List[ModuleID]:
"""Returns a list of module IDs stored in the buffer."""
return list(self._module_to_indices.keys())
def get_num_agent_timesteps(self) -> int:
"""Returns number of agent timesteps stored in the buffer.
Note, this could be more than the `num_timesteps` as an environment timestep
could contain multiple agent timesteps (for eaxch agent one).
"""
return self._num_agent_timesteps
@override(EpisodeReplayBuffer)
def get_num_episodes(self, module_id: Optional[ModuleID] = None) -> int:
"""Returns number of episodes stored for a module in the buffer.
Note, episodes could be either complete or truncated.
Args:
module_id: The ID of the module to query. If not provided, the number of
episodes for all modules is returned.
Returns:
The number of episodes stored for the module or all modules.
"""
return (
self._num_module_episodes[module_id]
if module_id
else super().get_num_episodes()
)
@override(EpisodeReplayBuffer)
def get_num_episodes_evicted(self, module_id: Optional[ModuleID] = None) -> int:
"""Returns number of episodes evicted for a module in the buffer."""
return (
self._num_module_episodes_evicted[module_id]
if module_id
else super().get_num_episodes_evicted()
)
@override(EpisodeReplayBuffer)
def get_num_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
"""Returns number of individual timesteps for a module stored in the buffer.
Args:
module_id: The ID of the module to query. If not provided, the number of
timesteps for all modules are returned.
Returns:
The number of timesteps stored for the module or all modules.
"""
return (
self._num_module_timesteps[module_id]
if module_id
else super().get_num_timesteps()
)
@override(EpisodeReplayBuffer)
def get_sampled_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
"""Returns number of timesteps that have been sampled for a module.
Args:
module_id: The ID of the module to query. If not provided, the number of
sampled timesteps for all modules are returned.
Returns:
The number of timesteps sampled for the module or all modules.
"""
return (
self.sampled_timesteps_per_module[module_id]
if module_id
else super().get_sampled_timesteps()
)
@override(EpisodeReplayBuffer)
def get_added_timesteps(self, module_id: Optional[ModuleID] = None) -> int:
"""Returns the number of timesteps added in buffer's lifetime for given module.
Args:
module_id: The ID of the module to query. If not provided, the total number
of timesteps ever added.
Returns:
The number of timesteps added for `module_id` (or all modules if `module_id`
is None).
"""
return (
self._num_module_timesteps_added[module_id]
if module_id
else super().get_added_timesteps()
)
@override(EpisodeReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Gets a pickable state of the buffer.
This is used for checkpointing the buffer's state. It is specifically helpful,
for example, when a trial is paused and resumed later on. The buffer's state
can be saved to disk and reloaded when the trial is resumed.
Returns:
A dict containing all necessary information to restore the buffer's state.
"""
return super().get_state() | {
"_module_to_indices": list(self._module_to_indices.items()),
"_num_agent_timesteps": self._num_agent_timesteps,
"_num_agent_timesteps_added": self._num_agent_timesteps_added,
"_num_module_timesteps": list(self._num_module_timesteps.items()),
"_num_module_timesteps_added": list(
self._num_module_timesteps_added.items()
),
"_num_module_episodes": list(self._num_module_episodes.items()),
"_num_module_episodes_evicted": list(
self._num_module_episodes_evicted.items()
),
"sampled_timesteps_per_module": list(
self.sampled_timesteps_per_module.items()
),
}
@override(EpisodeReplayBuffer)
def set_state(self, state) -> None:
"""Sets the state of a buffer from a previously stored state.
See `get_state()` for more information on what is stored in the state. This
method is used to restore the buffer's state from a previously stored state.
It is specifically helpful, for example, when a trial is paused and resumed
later on. The buffer's state can be saved to disk and reloaded when the trial
is resumed.
Args:
state: The state to restore the buffer from.
"""
# Set the episodes.
self._set_episodes(state)
# Set the super's state.
super().set_state(state)
# Now set the remaining attributes.
self._module_to_indices = defaultdict(list, dict(state["_module_to_indices"]))
self._num_agent_timesteps = state["_num_agent_timesteps"]
self._num_agent_timesteps_added = state["_num_agent_timesteps_added"]
self._num_module_timesteps = defaultdict(
int, dict(state["_num_module_timesteps"])
)
self._num_module_timesteps_added = defaultdict(
int, dict(state["_num_module_timesteps_added"])
)
self._num_module_episodes = defaultdict(
int, dict(state["_num_module_episodes"])
)
self._num_module_episodes_evicted = defaultdict(
int, dict(state["_num_module_episodes_evicted"])
)
self.sampled_timesteps_per_module = defaultdict(
list, dict(state["sampled_timesteps_per_module"])
)
def _set_episodes(self, state: Dict[str, Any]) -> None:
"""Sets the episodes from the state."""
if not self.episodes:
self.episodes = deque(
[
MultiAgentEpisode.from_state(eps_data)
for eps_data in state["episodes"]
]
)
def _sample_independent(
self,
batch_size_B: Optional[int],
batch_length_T: Optional[int],
n_step: Optional[Union[int, Tuple[int, int]]],
gamma: float,
include_infos: bool,
include_extra_model_outputs: bool,
modules_to_sample: Optional[Set[ModuleID]],
) -> List["SingleAgentEpisode"]:
"""Samples a batch of independent multi-agent transitions."""
actual_n_step = n_step or 1
# Sample the n-step if necessary.
random_n_step = isinstance(n_step, (tuple, list))
sampled_episodes = []
# Record the number of samples per module/agent/total.
num_env_steps_sampled = 0
agent_to_num_steps_sampled = defaultdict(int)
module_to_num_steps_sampled = defaultdict(int)
# Record all the env step buffer indices that are contained in the sample.
sampled_env_step_idxs = set()
agent_to_sampled_env_step_idxs = defaultdict(set)
module_to_sampled_env_step_idxs = defaultdict(set)
# Record all the episode buffer indices that are contained in the sample.
sampled_episode_idxs = set()
agent_to_sampled_episode_idxs = defaultdict(set)
module_to_sampled_episode_idxs = defaultdict(set)
# Record all n-steps that have been used.
sampled_n_steps = []
agent_to_sampled_n_steps = defaultdict(list)
module_to_sampled_n_steps = defaultdict(list)
# Record the number of times a sample needs to be resampled.
num_resamples = 0
agent_to_num_resamples = defaultdict(int)
module_to_num_resamples = defaultdict(int)
# TODO (simon): Ensure that the module has data and if not, skip it.
# TODO (sven): Should we then error out or skip? I think the Learner
# should handle this case when a module has no train data.
modules_to_sample = modules_to_sample or set(self._module_to_indices.keys())
for module_id in modules_to_sample:
module_indices = self._module_to_indices[module_id]
B = 0
while B < batch_size_B:
# Now sample from the single-agent timesteps.
index_tuple = module_indices[self.rng.integers(len(module_indices))]
# This will be an agent timestep (not env timestep).
# TODO (simon, sven): Maybe deprecate sa_episode_idx (_) in the index
# quads. Is there any need for it?
ma_episode_idx, agent_id, sa_episode_ts = (
index_tuple[0] - self._num_episodes_evicted,
index_tuple[1],
index_tuple[2],
)
# Get the multi-agent episode.
ma_episode = self.episodes[ma_episode_idx]
# Retrieve the single-agent episode for filtering.
sa_episode = ma_episode.agent_episodes[agent_id]
# If we use random n-step sampling, draw the n-step for this item.
if random_n_step:
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
# If we cannnot make the n-step, we resample.
if sa_episode_ts + actual_n_step > len(sa_episode):
num_resamples += 1
agent_to_num_resamples[agent_id] += 1
module_to_num_resamples[module_id] += 1
continue
# Note, this will be the reward after executing action
# `a_(episode_ts)`. For `n_step>1` this will be the discounted sum
# of all rewards that were collected over the last n steps.
sa_raw_rewards = sa_episode.get_rewards(
slice(sa_episode_ts, sa_episode_ts + actual_n_step)
)
sa_rewards = scipy.signal.lfilter(
[1], [1, -gamma], sa_raw_rewards[::-1], axis=0
)[-1]
sampled_sa_episode = SingleAgentEpisode(
id_=sa_episode.id_,
# Provide the IDs for the learner connector.
agent_id=sa_episode.agent_id,
module_id=sa_episode.module_id,
multi_agent_episode_id=ma_episode.id_,
# Ensure that each episode contains a tuple of the form:
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
# Two observations (t and t+n).
observations=[
sa_episode.get_observations(sa_episode_ts),
sa_episode.get_observations(sa_episode_ts + actual_n_step),
],
observation_space=sa_episode.observation_space,
infos=(
[
sa_episode.get_infos(sa_episode_ts),
sa_episode.get_infos(sa_episode_ts + actual_n_step),
]
if include_infos
else None
),
actions=[sa_episode.get_actions(sa_episode_ts)],
action_space=sa_episode.action_space,
rewards=[sa_rewards],
# If the sampled single-agent episode is the single-agent episode's
# last time step, check, if the single-agent episode is terminated
# or truncated.
terminated=(
sa_episode_ts + actual_n_step >= len(sa_episode)
and sa_episode.is_terminated
),
truncated=(
sa_episode_ts + actual_n_step >= len(sa_episode)
and sa_episode.is_truncated
),
extra_model_outputs={
"weights": [1.0],
"n_step": [actual_n_step],
**(
{
k: [
sa_episode.get_extra_model_outputs(k, sa_episode_ts)
]
for k in sa_episode.extra_model_outputs.keys()
}
if include_extra_model_outputs
else {}
),
},
# TODO (sven): Support lookback buffers.
len_lookback_buffer=0,
t_started=sa_episode_ts,
)
# Append single-agent episode to the list of sampled episodes.
sampled_episodes.append(sampled_sa_episode)
# Add the episode indices.
sampled_episode_idxs.add(ma_episode_idx)
agent_to_sampled_episode_idxs[sa_episode.agent_id].add(sa_episode.id_)
module_to_sampled_episode_idxs[module_id].add(sa_episode.id_)
# Add the unique step hashes.
# Get the corresponding index in the `env_to_agent_t` mapping.
# TODO (simon, sven): This has complexity O(n) and could become
# expensive when the episode is large. Note, however, that conversion
# from list to `numpy.ndarray` is also complexity O(n) and we do this
# at many places - also in the `MultiAgentEpisode`s.
ma_episode_ts = ma_episode.env_t_to_agent_t[agent_id].data.index(
sa_episode_ts
)
sampled_env_step_idxs.add(
hashlib.sha256(
f"{ma_episode.id_}-{ma_episode_ts}".encode()
).hexdigest()
)
hashed_agent_step = hashlib.sha256(
f"{sa_episode.id_}-{sa_episode_ts}".encode()
).hexdigest()
agent_to_sampled_env_step_idxs[agent_id].add(hashed_agent_step)
module_to_sampled_env_step_idxs[module_id].add(hashed_agent_step)
# Add the actual n-step used in generating this sample.
sampled_n_steps.append(actual_n_step)
agent_to_sampled_n_steps[agent_id].append(actual_n_step)
module_to_sampled_n_steps[module_id].append(actual_n_step)
# Increase counter.
B += 1
# Increase the per module timesteps counter.
self.sampled_timesteps_per_module[module_id] += B
# Increase the counter metrics.
num_env_steps_sampled += B
agent_to_num_steps_sampled[agent_id] += B
module_to_num_steps_sampled[module_id] += B
# Increase the counter for environment timesteps.
self.sampled_timesteps += batch_size_B
# Update the sample metrics.
num_episodes_per_sample = len(sampled_episode_idxs)
num_env_steps_per_sample = len(sampled_env_step_idxs)
sampled_n_step = sum(sampled_n_steps) / batch_size_B
agent_to_num_episodes_per_sample = {
aid: len(l) for aid, l in agent_to_sampled_episode_idxs.items()
}
module_to_num_episodes_per_sample = {
mid: len(l) for mid, l in module_to_sampled_episode_idxs.items()
}
agent_to_num_steps_per_sample = {
aid: len(l) for aid, l in agent_to_sampled_env_step_idxs.items()
}
module_to_num_steps_per_sample = {
mid: len(l) for mid, l in module_to_sampled_env_step_idxs.items()
}
agent_to_sampled_n_step = {
aid: sum(l) / len(l) for aid, l in agent_to_sampled_n_steps.items()
}
module_to_sampled_n_step = {
mid: sum(l) / len(l) for mid, l in module_to_sampled_n_steps.items()
}
self._update_sample_metrics(
num_env_steps_sampled=num_env_steps_sampled,
num_episodes_per_sample=num_episodes_per_sample,
num_env_steps_per_sample=num_env_steps_per_sample,
sampled_n_step=sampled_n_step,
num_resamples=num_resamples,
agent_to_num_steps_sampled=agent_to_num_steps_sampled,
agent_to_num_episodes_per_sample=agent_to_num_episodes_per_sample,
agent_to_num_steps_per_sample=agent_to_num_steps_per_sample,
agent_to_sampled_n_step=agent_to_sampled_n_step,
agent_to_num_resamples=agent_to_num_resamples,
module_to_num_steps_sampled=module_to_num_steps_sampled,
module_to_num_episodes_per_sample=module_to_num_episodes_per_sample,
module_to_num_steps_per_sample=module_to_num_steps_per_sample,
module_to_sampled_n_step=module_to_sampled_n_step,
module_to_num_resamples=module_to_num_resamples,
)
# Return multi-agent dictionary.
return sampled_episodes
def _sample_synchonized(
self,
batch_size_B: Optional[int],
batch_length_T: Optional[int],
n_step: Optional[Union[int, Tuple]],
gamma: float,
include_infos: bool,
include_extra_model_outputs: bool,
modules_to_sample: Optional[List[ModuleID]],
) -> SampleBatchType:
"""Samples a batch of synchronized multi-agent transitions."""
# Sample the n-step if necessary.
if isinstance(n_step, tuple):
# Use random n-step sampling.
random_n_step = True
else:
actual_n_step = n_step or 1
random_n_step = False
# Containers for the sampled data.
observations: Dict[ModuleID, List[ObsType]] = defaultdict(list)
next_observations: Dict[ModuleID, List[ObsType]] = defaultdict(list)
actions: Dict[ModuleID, List[ActType]] = defaultdict(list)
rewards: Dict[ModuleID, List[float]] = defaultdict(list)
is_terminated: Dict[ModuleID, List[bool]] = defaultdict(list)
is_truncated: Dict[ModuleID, List[bool]] = defaultdict(list)
weights: Dict[ModuleID, List[float]] = defaultdict(list)
n_steps: Dict[ModuleID, List[int]] = defaultdict(list)
# If `info` should be included, construct also a container for them.
if include_infos:
infos: Dict[ModuleID, List[Dict[str, Any]]] = defaultdict(list)
# If `extra_model_outputs` should be included, construct a container for them.
if include_extra_model_outputs:
extra_model_outputs: Dict[ModuleID, List[Dict[str, Any]]] = defaultdict(
list
)
B = 0
while B < batch_size_B:
index_tuple = self._indices[self.rng.integers(len(self._indices))]
# This will be an env timestep (not agent timestep)
ma_episode_idx, ma_episode_ts = (
index_tuple[0] - self._num_episodes_evicted,
index_tuple[1],
)
# If we use random n-step sampling, draw the n-step for this item.
if random_n_step:
actual_n_step = int(self.rng.integers(n_step[0], n_step[1]))
# If we are at the end of an episode, continue.
# Note, priority sampling got us `o_(t+n)` and we need for the loss
# calculation in addition `o_t`.
# TODO (simon): Maybe introduce a variable `num_retries` until the
# while loop should break when not enough samples have been collected
# to make n-step possible.
if ma_episode_ts - actual_n_step < 0:
continue
# Retrieve the multi-agent episode.
ma_episode = self.episodes[ma_episode_idx]
# Ensure that each row contains a tuple of the form:
# (o_t, a_t, sum(r_(t:t+n_step)), o_(t+n_step))
# TODO (simon): Implement version for sequence sampling when using RNNs.
eps_observation = ma_episode.get_observations(
slice(ma_episode_ts - actual_n_step, ma_episode_ts + 1),
return_list=True,
)
# Note, `MultiAgentEpisode` stores the action that followed
# `o_t` with `o_(t+1)`, therefore, we need the next one.
# TODO (simon): This gets the wrong action as long as the getters are not
# fixed.
eps_actions = ma_episode.get_actions(ma_episode_ts - actual_n_step)
# Make sure that at least a single agent should have full transition.
# TODO (simon): Filter for the `modules_to_sample`.
agents_to_sample = self._agents_with_full_transitions(
eps_observation,
eps_actions,
)
# If not, we resample.
if not agents_to_sample:
continue
# TODO (simon, sven): Do we need to include the common agent rewards?
# Note, the reward that is collected by transitioning from `o_t` to
# `o_(t+1)` is stored in the next transition in `MultiAgentEpisode`.
eps_rewards = ma_episode.get_rewards(
slice(ma_episode_ts - actual_n_step, ma_episode_ts),
return_list=True,
)
# TODO (simon, sven): Do we need to include the common infos? And are
# there common extra model outputs?
if include_infos:
# If infos are included we include the ones from the last timestep
# as usually the info contains additional values about the last state.
eps_infos = ma_episode.get_infos(ma_episode_ts)
if include_extra_model_outputs:
# If `extra_model_outputs` are included we include the ones from the
# first timestep as usually the `extra_model_outputs` contain additional
# values from the forward pass that produced the action at the first
# timestep.
# Note, we extract them into single row dictionaries similar to the
# infos, in a connector we can then extract these into single batch
# rows.
eps_extra_model_outputs = {
k: ma_episode.get_extra_model_outputs(
k, ma_episode_ts - actual_n_step
)
for k in ma_episode.extra_model_outputs.keys()
}
# If the sampled time step is the episode's last time step check, if
# the episode is terminated or truncated.
episode_terminated = False
episode_truncated = False
if ma_episode_ts == ma_episode.env_t:
episode_terminated = ma_episode.is_terminated
episode_truncated = ma_episode.is_truncated
# TODO (simon): Filter for the `modules_to_sample`.
# TODO (sven, simon): We could here also sample for all agents in the
# `modules_to_sample` and then adapt the `n_step` for agents that
# have not a full transition.
for agent_id in agents_to_sample:
# Map our agent to the corresponding module we want to
# train.
module_id = ma_episode._agent_to_module_mapping[agent_id]
# Sample only for the modules in `modules_to_sample`.
if module_id not in (
modules_to_sample or self._module_to_indices.keys()
):
continue
# TODO (simon, sven): Here we could skip for modules not
# to be sampled in `modules_to_sample`.
observations[module_id].append(eps_observation[0][agent_id])
next_observations[module_id].append(eps_observation[-1][agent_id])
# Fill missing rewards with zeros.
agent_rewards = [r[agent_id] or 0.0 for r in eps_rewards]
rewards[module_id].append(
scipy.signal.lfilter([1], [1, -gamma], agent_rewards[::-1], axis=0)[
-1
]
)
# Note, this should exist, as we filtered for agents with full
# transitions.
actions[module_id].append(eps_actions[agent_id])
if include_infos:
infos[module_id].append(eps_infos[agent_id])
if include_extra_model_outputs:
extra_model_outputs[module_id].append(
{
k: eps_extra_model_outputs[agent_id][k]
for k in eps_extra_model_outputs[agent_id].keys()
}
)
# If sampled observation is terminal for the agent. Either MAE
# episode is truncated/terminated or SAE episode is truncated/
# terminated at this ts.
# TODO (simon, sven): Add method agent_alive(ts) to MAE.
# or add slicing to get_terminateds().
agent_ts = ma_episode.env_t_to_agent_t[agent_id][ma_episode_ts]
agent_eps = ma_episode.agent_episodes[agent_id]
agent_terminated = agent_ts == agent_eps.t and agent_eps.is_terminated
agent_truncated = (
agent_ts == agent_eps.t
and agent_eps.is_truncated
and not agent_eps.is_terminated
)
if episode_terminated or agent_terminated:
is_terminated[module_id].append(True)
is_truncated[module_id].append(False)
elif episode_truncated or agent_truncated:
is_truncated[module_id].append(True)
is_terminated[module_id].append(False)
else:
is_terminated[module_id].append(False)
is_truncated[module_id].append(False)
# Increase the per module counter.
self.sampled_timesteps_per_module[module_id] += 1
# Increase counter.
B += 1
# Increase the counter for environment timesteps.
self.sampled_timesteps += batch_size_B
# Should be convertible to MultiAgentBatch.
ret = {
**{
module_id: {
Columns.OBS: batch(observations[module_id]),
Columns.ACTIONS: batch(actions[module_id]),
Columns.REWARDS: np.array(rewards[module_id]),
Columns.NEXT_OBS: batch(next_observations[module_id]),
Columns.TERMINATEDS: np.array(is_terminated[module_id]),
Columns.TRUNCATEDS: np.array(is_truncated[module_id]),
"weights": np.array(weights[module_id]),
"n_step": np.array(n_steps[module_id]),
}
for module_id in observations.keys()
}
}
# Return multi-agent dictionary.
return ret
def _num_remaining_episodes(self, new_eps, evicted_eps):
"""Calculates the number of remaining episodes.
When adding episodes and evicting them in the `add()` method
this function calculates iteratively the number of remaining
episodes.
Args:
new_eps: List of new episode IDs.
evicted_eps: List of evicted episode IDs.
Returns:
Number of episodes remaining after evicting the episodes in
`evicted_eps` and adding the episode in `new_eps`.
"""
return len(
set(self.episode_id_to_index.keys()).union(set(new_eps)) - set(evicted_eps)
)
def _evict_module_episodes(self, ma_episode: MultiAgentEpisode) -> None:
"""Evicts the module episodes from the buffer adn updates all counters.
Args:
multi_agent_eps: The multi-agent episode to evict from the buffer.
"""
# Note we need to take the agent ids from the evicted episode because
# different episodes can have different agents and module mappings.
for agent_id in ma_episode.agent_episodes:
# Retrieve the corresponding module ID and module episode.
module_id = ma_episode._agent_to_module_mapping[agent_id]
module_eps = ma_episode.agent_episodes[agent_id]
# Update all counters.
self._num_module_timesteps[module_id] -= module_eps.env_steps()
self._num_module_episodes[module_id] -= 1
self._num_module_episodes_evicted[module_id] += 1
def _update_module_counters(self, ma_episode: MultiAgentEpisode) -> None:
"""Updates the module counters after adding an episode.
Args:
multi_agent_episode: The multi-agent episode to update the module counters
for.
"""
for agent_id in ma_episode.agent_ids:
agent_steps = ma_episode.agent_episodes[agent_id].env_steps()
# Only add if the agent has stepped in the episode (chunk).
if agent_steps > 0:
# Receive the corresponding module ID.
module_id = ma_episode.module_for(agent_id)
self._num_module_timesteps[module_id] += agent_steps
self._num_module_timesteps_added[module_id] += agent_steps
# if ma_episode.agent_episodes[agent_id].is_done:
# # TODO (simon): Check, if we do not count the same episode
# # multiple times.
# # Also add to the module episode counter.
# self._num_module_episodes[module_id] += 1
def _add_new_module_indices(
self,
ma_episode: MultiAgentEpisode,
episode_idx: int,
ma_episode_exists: bool = True,
) -> None:
"""Adds the module indices for new episode chunks.
Args:
ma_episode: The multi-agent episode to add the module indices for.
episode_idx: The index of the episode in the `self.episodes`.
ma_episode_exists: Whether `ma_episode` is already in this buffer (with a
predecessor chunk to which we'll concatenate `ma_episode` later).
"""
existing_ma_episode = None
if ma_episode_exists:
existing_ma_episode = self.episodes[
self.episode_id_to_index[ma_episode.id_] - self._num_episodes_evicted
]
# Note, we iterate through the agent episodes b/c we want to store records
# and some agents could not have entered the environment.
for agent_id in ma_episode.agent_episodes:
# Get the corresponding module id.
module_id = ma_episode.module_for(agent_id)
# Get the module episode.
module_eps = ma_episode.agent_episodes[agent_id]
# Is the agent episode already in the buffer's existing `ma_episode`?
if ma_episode_exists and agent_id in existing_ma_episode.agent_episodes:
existing_sa_eps_len = len(existing_ma_episode.agent_episodes[agent_id])
# Otherwise, it is a new single-agent episode and we increase the counter.
else:
existing_sa_eps_len = 0
self._num_module_episodes[module_id] += 1
# Add new module indices.
self._module_to_indices[module_id].extend(
[
(
# Keep the MAE index for sampling
episode_idx,
agent_id,
existing_sa_eps_len + i,
)
for i in range(len(module_eps))
]
)
def _agents_with_full_transitions(
self, observations: Dict[AgentID, ObsType], actions: Dict[AgentID, ActType]
):
"""Filters for agents that have full transitions.
Args:
observations: The observations of the episode.
actions: The actions of the episode.
Returns:
List of agent IDs that have full transitions.
"""
agents_to_sample = []
for agent_id in observations[0].keys():
# Only if the agent has an action at the first and an observation
# at the first and last timestep of the n-step transition, we can sample it.
if agent_id in actions and agent_id in observations[-1]:
agents_to_sample.append(agent_id)
return agents_to_sample
| MultiAgentEpisodeReplayBuffer |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/inotify_c.py | {
"start": 2266,
"end": 4702
} | class ____(object):
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
| InotifyConstants |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 5251,
"end": 5346
} | class ____(TimedeltaOrderedCmpOp):
key = operator.le
@infer_global(operator.gt)
| TimedeltaCmpLE |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 277986,
"end": 278693
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of RemoveOutsideCollaborator"""
__schema__ = github_schema
__field_names__ = ("user_id", "organization_id", "client_mutation_id")
user_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="userId")
"""The ID of the outside collaborator to remove."""
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization to remove the outside collaborator
from.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| RemoveOutsideCollaboratorInput |
python | ansible__ansible | test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py | {
"start": 193,
"end": 366
} | class ____:
DOCUMENTATION = r"""
options:
option1:
description:
- Test description
required: yes
aliases:
- alias1
type: str
"""
| ModuleDocFragment |
python | python__mypy | mypy/nodes.py | {
"start": 85808,
"end": 86809
} | class ____(Expression):
"""Dictionary comprehension (e.g. {k: v for k, v in a}"""
__slots__ = ("key", "value", "sequences", "condlists", "is_async", "indices")
__match_args__ = ("key", "value", "indices", "sequences", "condlists")
key: Expression
value: Expression
sequences: list[Expression]
condlists: list[list[Expression]]
is_async: list[bool]
indices: list[Lvalue]
def __init__(
self,
key: Expression,
value: Expression,
indices: list[Lvalue],
sequences: list[Expression],
condlists: list[list[Expression]],
is_async: list[bool],
) -> None:
super().__init__()
self.key = key
self.value = value
self.sequences = sequences
self.condlists = condlists
self.indices = indices
self.is_async = is_async
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_dictionary_comprehension(self)
| DictionaryComprehension |
python | gevent__gevent | src/greentest/3.11/test_ssl.py | {
"start": 82526,
"end": 98722
} | class ____(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.enterContext(server)
self.server_addr = (HOST, server.port)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
count = 0
for _ in support.busy_retry(timeout):
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
def test_transport_eof(self):
client_context, server_context, hostname = testing_context()
with socket.socket(socket.AF_INET) as sock:
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = client_context.wrap_bio(incoming, outgoing,
server_hostname=hostname)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
# Simulate EOF from the transport.
incoming.write_eof()
self.assertRaises(ssl.SSLEOFError, sslobj.read)
@support.requires_resource('network')
| SimpleBackgroundTests |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 215068,
"end": 227044
} | class ____:
coord_name = "lon"
var_name = "v1"
@contextlib.contextmanager
def setup_files_and_datasets(self, *, fuzz=0, new_combine_kwargs: bool = False):
ds1, ds2 = self.gen_datasets_with_common_coord_and_time()
# to test join='exact'
ds1["x"] = ds1.x + fuzz
with create_tmp_file() as tmpfile1:
with create_tmp_file() as tmpfile2:
# save data to the temporary files
ds1.to_netcdf(tmpfile1)
ds2.to_netcdf(tmpfile2)
with set_options(use_new_combine_kwarg_defaults=new_combine_kwargs):
yield [tmpfile1, tmpfile2], [ds1, ds2]
def gen_datasets_with_common_coord_and_time(self):
# create coordinate data
nx = 10
nt = 10
x = np.arange(nx)
t1 = np.arange(nt)
t2 = np.arange(nt, 2 * nt, 1)
v1 = np.random.randn(nt, nx)
v2 = np.random.randn(nt, nx)
ds1 = Dataset(
data_vars={self.var_name: (["t", "x"], v1), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t1), "x": (["x"], x)},
)
ds2 = Dataset(
data_vars={self.var_name: (["t", "x"], v2), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t2), "x": (["x"], x)},
)
return ds1, ds2
@pytest.mark.parametrize(
"combine, concat_dim", [("nested", "t"), ("by_coords", None)]
)
@pytest.mark.parametrize("opt", ["all", "minimal", "different"])
@pytest.mark.parametrize("join", ["outer", "inner", "left", "right"])
def test_open_mfdataset_does_same_as_concat(
self, combine, concat_dim, opt, join
) -> None:
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
if combine == "by_coords":
files.reverse()
with open_mfdataset(
files,
data_vars=opt,
combine=combine,
concat_dim=concat_dim,
join=join,
compat="equals",
) as ds:
ds_expect = xr.concat(
[ds1, ds2], data_vars=opt, dim="t", join=join, compat="equals"
)
assert_identical(ds, ds_expect)
@pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False])
@pytest.mark.parametrize(
["combine_attrs", "attrs", "expected", "expect_error"],
(
pytest.param("drop", [{"a": 1}, {"a": 2}], {}, False, id="drop"),
pytest.param(
"override", [{"a": 1}, {"a": 2}], {"a": 1}, False, id="override"
),
pytest.param(
"no_conflicts", [{"a": 1}, {"a": 2}], None, True, id="no_conflicts"
),
pytest.param(
"identical",
[{"a": 1, "b": 2}, {"a": 1, "c": 3}],
None,
True,
id="identical",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": -1, "c": 3}],
{"a": 1, "c": 3},
False,
id="drop_conflicts",
),
),
)
def test_open_mfdataset_dataset_combine_attrs(
self,
use_new_combine_kwarg_defaults,
combine_attrs,
attrs,
expected,
expect_error,
):
with self.setup_files_and_datasets() as (files, [_ds1, _ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds.attrs = attrs[i]
ds.close()
ds.to_netcdf(f)
with set_options(
use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults
):
warning: contextlib.AbstractContextManager = (
pytest.warns(FutureWarning)
if not use_new_combine_kwarg_defaults
else contextlib.nullcontext()
)
error: contextlib.AbstractContextManager = (
pytest.raises(xr.MergeError)
if expect_error
else contextlib.nullcontext()
)
with warning:
with error:
with xr.open_mfdataset(
files,
combine="nested",
concat_dim="t",
combine_attrs=combine_attrs,
) as ds:
assert ds.attrs == expected
def test_open_mfdataset_dataset_attr_by_coords(self) -> None:
"""
Case when an attribute differs across the multiple files
"""
with self.setup_files_and_datasets() as (files, [_ds1, _ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds.attrs["test_dataset_attr"] = 10 + i
ds.close()
ds.to_netcdf(f)
with set_options(use_new_combine_kwarg_defaults=True):
with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
assert ds.test_dataset_attr == 10
def test_open_mfdataset_dataarray_attr_by_coords(self) -> None:
"""
Case when an attribute of a member DataArray differs across the multiple files
"""
with self.setup_files_and_datasets(new_combine_kwargs=True) as (
files,
[_ds1, _ds2],
):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds["v1"].attrs["test_dataarray_attr"] = i
ds.close()
ds.to_netcdf(f)
with xr.open_mfdataset(
files, data_vars=None, combine="nested", concat_dim="t"
) as ds:
assert ds["v1"].test_dataarray_attr == 0
@pytest.mark.parametrize(
"combine, concat_dim", [("nested", "t"), ("by_coords", None)]
)
@pytest.mark.parametrize(
"kwargs",
[
{"data_vars": "all"},
{"data_vars": "minimal"},
{
"data_vars": "all",
"coords": "different",
"compat": "no_conflicts",
}, # old defaults
{
"data_vars": None,
"coords": "minimal",
"compat": "override",
}, # new defaults
{"data_vars": "different", "compat": "no_conflicts"},
{},
],
)
def test_open_mfdataset_exact_join_raises_error(
self, combine, concat_dim, kwargs
) -> None:
with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as (
files,
_,
):
if combine == "by_coords":
files.reverse()
with pytest.raises(
ValueError, match="cannot align objects with join='exact'"
):
open_mfdataset(
files,
**kwargs,
combine=combine,
concat_dim=concat_dim,
join="exact",
)
def test_open_mfdataset_defaults_with_exact_join_warns_as_well_as_raising(
self,
) -> None:
with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as (
files,
_,
):
files.reverse()
with pytest.raises(
ValueError, match="cannot align objects with join='exact'"
):
open_mfdataset(files, combine="by_coords")
def test_common_coord_when_datavars_all(self) -> None:
opt: Final = "all"
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files with the data_var option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape == coord_shape
assert coord_shape1 != coord_shape
assert coord_shape2 != coord_shape
def test_common_coord_when_datavars_minimal(self) -> None:
opt: Final = "minimal"
with self.setup_files_and_datasets(new_combine_kwargs=True) as (
files,
[ds1, ds2],
):
# open the files using data_vars option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape != coord_shape
assert coord_shape1 == coord_shape
assert coord_shape2 == coord_shape
def test_invalid_data_vars_value_should_fail(self) -> None:
with self.setup_files_and_datasets() as (files, _):
with pytest.raises(ValueError):
with open_mfdataset(files, data_vars="minimum", combine="by_coords"): # type: ignore[arg-type]
pass
# test invalid coord parameter
with pytest.raises(ValueError):
with open_mfdataset(files, coords="minimum", combine="by_coords"):
pass
@pytest.mark.parametrize(
"combine, concat_dim", [("nested", "t"), ("by_coords", None)]
)
@pytest.mark.parametrize(
"kwargs", [{"data_vars": "different"}, {"coords": "different"}]
)
def test_open_mfdataset_warns_when_kwargs_set_to_different(
self, combine, concat_dim, kwargs
) -> None:
with self.setup_files_and_datasets(new_combine_kwargs=True) as (
files,
[ds1, ds2],
):
if combine == "by_coords":
files.reverse()
with pytest.raises(
ValueError, match="Previously the default was `compat='no_conflicts'`"
):
open_mfdataset(files, combine=combine, concat_dim=concat_dim, **kwargs)
with pytest.raises(
ValueError, match="Previously the default was `compat='equals'`"
):
xr.concat([ds1, ds2], dim="t", **kwargs)
with set_options(use_new_combine_kwarg_defaults=False):
expectation: contextlib.AbstractContextManager = (
pytest.warns(
FutureWarning,
match="will change from data_vars='all'",
)
if "data_vars" not in kwargs
else contextlib.nullcontext()
)
with pytest.warns(
FutureWarning,
match="will change from compat='equals'",
):
with expectation:
ds_expect = xr.concat([ds1, ds2], dim="t", **kwargs)
with pytest.warns(
FutureWarning, match="will change from compat='no_conflicts'"
):
with expectation:
with open_mfdataset(
files, combine=combine, concat_dim=concat_dim, **kwargs
) as ds:
assert_identical(ds, ds_expect)
@requires_dask
@requires_scipy
@requires_netCDF4
| TestOpenMFDatasetWithDataVarsAndCoordsKw |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/sensors/test_wasb.py | {
"start": 7991,
"end": 9390
} | class ____:
_config = {
"container_name": "container",
"prefix": "prefix",
"wasb_conn_id": "conn_id",
"timeout": 100,
}
def setup_method(self):
args = {"owner": "airflow", "start_date": datetime.datetime(2017, 1, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(task_id="wasb_sensor_1", dag=self.dag, **self._config)
assert sensor.container_name == self._config["container_name"]
assert sensor.prefix == self._config["prefix"]
assert sensor.wasb_conn_id == self._config["wasb_conn_id"]
assert sensor.check_options == {}
assert sensor.timeout == self._config["timeout"]
sensor = WasbPrefixSensor(
task_id="wasb_sensor_2", dag=self.dag, check_options={"timeout": 2}, **self._config
)
assert sensor.check_options == {"timeout": 2}
@mock.patch("airflow.providers.microsoft.azure.sensors.wasb.WasbHook", autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id="wasb_sensor", dag=self.dag, check_options={"timeout": 2}, **self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with("container", "prefix", timeout=2)
| TestWasbPrefixSensor |
python | ray-project__ray | python/ray/air/util/tensor_extensions/arrow.py | {
"start": 2955,
"end": 6572
} | class ____(abc.ABC):
"""Base class for caching Arrow extension type serialization and deserialization.
The deserialization and serialization of Arrow extension types is frequent,
so we cache the results here to improve performance.
The deserialization cache uses functools.lru_cache as a classmethod. There is
a single cache instance shared across all subclasses, but the cache key includes
the class (cls parameter) as the first argument, so different subclasses get
different cache entries even when called with the same parameters. The cache is
thread-safe and has a maximum size limit to control memory usage. The cache key
is (cls, *args) where args are the parameters returned by _get_deserialize_parameter().
Attributes:
_serialize_cache: Instance-level cache for serialization results.
This is a simple cached value (bytes) that is computed once per
instance and reused.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize the extension type with caching support.
Args:
*args: Positional arguments passed to the parent class.
**kwargs: Keyword arguments passed to the parent class.
"""
# Instance-level cache for serialization results, no TTL
self._serialize_cache = None
self._cache_lock = threading.RLock()
super().__init__(*args, **kwargs)
def __arrow_ext_serialize__(self) -> bytes:
"""Serialize the extension type using caching if enabled."""
if self._serialize_cache is not None:
return self._serialize_cache
with self._cache_lock:
if self._serialize_cache is None:
self._serialize_cache = self._arrow_ext_serialize_compute()
return self._serialize_cache
@abstractmethod
def _arrow_ext_serialize_compute(self) -> bytes:
"""Subclasses must implement this method to compute serialization."""
...
@classmethod
@functools.lru_cache(maxsize=ARROW_EXTENSION_SERIALIZATION_CACHE_MAXSIZE)
def _arrow_ext_deserialize_cache(cls: type, *args: Any, **kwargs: Any) -> Any:
"""Deserialize the extension type using the class-level cache.
This method is cached using functools.lru_cache to improve performance
when deserializing extension types. The cache key includes the class (cls)
as the first argument, ensuring different subclasses get separate cache entries.
Args:
*args: Positional arguments passed to _arrow_ext_deserialize_compute.
**kwargs: Keyword arguments passed to _arrow_ext_deserialize_compute.
Returns:
The deserialized extension type instance.
"""
return cls._arrow_ext_deserialize_compute(*args, **kwargs)
@classmethod
@abstractmethod
def _arrow_ext_deserialize_compute(cls, *args: Any, **kwargs: Any) -> Any:
"""Subclasses must implement this method to compute deserialization."""
...
@classmethod
@abstractmethod
def _get_deserialize_parameter(cls, storage_type, serialized) -> Tuple:
"""Subclasses must implement this method to return the parameters for the deserialization cache."""
...
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized) -> Any:
"""Deserialize the extension type using caching if enabled."""
return cls._arrow_ext_deserialize_cache(
*cls._get_deserialize_parameter(storage_type, serialized)
)
@DeveloperAPI
| ArrowExtensionSerializeDeserializeCache |
python | huggingface__transformers | tests/models/layoutlmv2/test_modeling_layoutlmv2.py | {
"start": 1508,
"end": 10119
} | class ____:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=4,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=36,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
image_feature_pool_shape=[7, 7, 32],
coordinate_size=6,
shape_size=6,
num_labels=3,
num_choices=4,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.range_bbox = range_bbox
detectron2_config = LayoutLMv2Config.get_default_detectron2_config()
# We need to make the model smaller
detectron2_config["MODEL.RESNETS.DEPTH"] = 50
detectron2_config["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 4
detectron2_config["MODEL.RESNETS.STEM_OUT_CHANNELS"] = 4
detectron2_config["MODEL.FPN.OUT_CHANNELS"] = 32
detectron2_config["MODEL.RESNETS.NUM_GROUPS"] = 1
self.detectron2_config = detectron2_config
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
image = ImageList(
torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device),
self.image_size,
)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = LayoutLMv2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
image_feature_pool_shape=self.image_feature_pool_shape,
coordinate_size=self.coordinate_size,
shape_size=self.shape_size,
detectron2_config_args=self.detectron2_config,
)
return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
def create_and_check_model(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, image=image)
# LayoutLMv2 has a different expected sequence length, namely also visual tokens are added
expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1]
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
config.num_labels = self.num_labels
model = LayoutLMv2ForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
):
model = LayoutLMv2ForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
image=image,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
image,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"image": image,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_non_xpu
@require_torch
@require_detectron2
| LayoutLMv2ModelTester |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/concurrency.py | {
"start": 7326,
"end": 9240
} | class ____:
@memoized_property
def mutex(self) -> asyncio.Lock:
# there should not be a race here for coroutines creating the
# new lock as we are not using await, so therefore no concurrency
return asyncio.Lock()
def __enter__(self) -> bool:
# await is used to acquire the lock only after the first calling
# coroutine has created the mutex.
return await_(self.mutex.acquire())
def __exit__(self, *arg: Any, **kw: Any) -> None:
self.mutex.release()
if not TYPE_CHECKING and py311:
_Runner = asyncio.Runner
else:
class _Runner:
"""Runner implementation for test only"""
_loop: Union[None, asyncio.AbstractEventLoop, Literal[False]]
def __init__(self) -> None:
self._loop = None
def __enter__(self) -> Self:
self._lazy_init()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.close()
def close(self) -> None:
if self._loop:
try:
self._loop.run_until_complete(
self._loop.shutdown_asyncgens()
)
finally:
self._loop.close()
self._loop = False
def get_loop(self) -> asyncio.AbstractEventLoop:
"""Return embedded event loop."""
self._lazy_init()
assert self._loop
return self._loop
def run(self, coro: Coroutine[Any, Any, _T]) -> _T:
self._lazy_init()
assert self._loop
return self._loop.run_until_complete(coro)
def _lazy_init(self) -> None:
if self._loop is False:
raise RuntimeError("Runner is closed")
if self._loop is None:
self._loop = asyncio.new_event_loop()
| AsyncAdaptedLock |
python | huggingface__transformers | tests/models/marian/test_modeling_marian.py | {
"start": 23265,
"end": 30478
} | class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=100,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = MarianConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
decoder_layers=self.decoder_layers,
num_hidden_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = MarianDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = MarianDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| MarianStandaloneDecoderModelTester |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 12688,
"end": 13376
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 8
# Ability
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Ability
def Id(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# Ability
def Distance(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
def CreateAbility(builder, id, distance):
builder.Prep(4, 8)
builder.PrependUint32(distance)
builder.PrependUint32(id)
return builder.Offset()
| Ability |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/base.py | {
"start": 2750,
"end": 12416
} | class ____(BaseAuthorizationView, FormView):
"""
Implements an endpoint to handle *Authorization Requests* as in :rfc:`4.1.1` and prompting the
user with a form to determine if she authorizes the client application to access her data.
This endpoint is reached two times during the authorization process:
* first receive a ``GET`` request from user asking authorization for a certain client
application, a form is served possibly showing some useful info and prompting for
*authorize/do not authorize*.
* then receive a ``POST`` request possibly after user authorized the access
Some information contained in the ``GET`` request and needed to create a Grant token during
the ``POST`` request would be lost between the two steps above, so they are temporarily stored in
hidden fields on the form.
A possible alternative could be keeping such information in the session.
The endpoint is used in the following flows:
* Authorization code
* Implicit grant
"""
template_name = "oauth2_provider/authorize.html"
form_class = AllowForm
skip_authorization_completely = False
def get_initial(self):
# TODO: move this scopes conversion from and to string into a utils function
scopes = self.oauth2_data.get("scope", self.oauth2_data.get("scopes", []))
initial_data = {
"redirect_uri": self.oauth2_data.get("redirect_uri", None),
"scope": " ".join(scopes),
"nonce": self.oauth2_data.get("nonce", None),
"client_id": self.oauth2_data.get("client_id", None),
"state": self.oauth2_data.get("state", None),
"response_type": self.oauth2_data.get("response_type", None),
"code_challenge": self.oauth2_data.get("code_challenge", None),
"code_challenge_method": self.oauth2_data.get("code_challenge_method", None),
"claims": self.oauth2_data.get("claims", None),
}
return initial_data
def form_valid(self, form):
client_id = form.cleaned_data["client_id"]
application = get_application_model().objects.get(client_id=client_id)
credentials = {
"client_id": form.cleaned_data.get("client_id"),
"redirect_uri": form.cleaned_data.get("redirect_uri"),
"response_type": form.cleaned_data.get("response_type", None),
"state": form.cleaned_data.get("state", None),
}
if form.cleaned_data.get("code_challenge", False):
credentials["code_challenge"] = form.cleaned_data.get("code_challenge")
if form.cleaned_data.get("code_challenge_method", False):
credentials["code_challenge_method"] = form.cleaned_data.get("code_challenge_method")
if form.cleaned_data.get("nonce", False):
credentials["nonce"] = form.cleaned_data.get("nonce")
if form.cleaned_data.get("claims", False):
credentials["claims"] = form.cleaned_data.get("claims")
scopes = form.cleaned_data.get("scope")
allow = form.cleaned_data.get("allow")
try:
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=scopes, credentials=credentials, allow=allow
)
except OAuthToolkitError as error:
return self.error_response(error, application)
self.success_url = uri
log.debug("Success url for the request: {0}".format(self.success_url))
return self.redirect(self.success_url, application)
def get(self, request, *args, **kwargs):
try:
scopes, credentials = self.validate_authorization_request(request)
except OAuthToolkitError as error:
# Application is not available at this time.
return self.error_response(error, application=None)
prompt = request.GET.get("prompt")
if prompt == "login":
return self.handle_prompt_login()
all_scopes = get_scopes_backend().get_all_scopes()
kwargs["scopes_descriptions"] = [all_scopes[scope] for scope in scopes]
kwargs["scopes"] = scopes
# at this point we know an Application instance with such client_id exists in the database
# TODO: Cache this!
application = get_application_model().objects.get(client_id=credentials["client_id"])
kwargs["application"] = application
kwargs["client_id"] = credentials["client_id"]
kwargs["redirect_uri"] = credentials["redirect_uri"]
kwargs["response_type"] = credentials["response_type"]
kwargs["state"] = credentials["state"]
if "code_challenge" in credentials:
kwargs["code_challenge"] = credentials["code_challenge"]
if "code_challenge_method" in credentials:
kwargs["code_challenge_method"] = credentials["code_challenge_method"]
if "nonce" in credentials:
kwargs["nonce"] = credentials["nonce"]
if "claims" in credentials:
kwargs["claims"] = json.dumps(credentials["claims"])
self.oauth2_data = kwargs
# following two loc are here only because of https://code.djangoproject.com/ticket/17795
form = self.get_form(self.get_form_class())
kwargs["form"] = form
# Check to see if the user has already granted access and return
# a successful response depending on "approval_prompt" url parameter
require_approval = request.GET.get("approval_prompt", oauth2_settings.REQUEST_APPROVAL_PROMPT)
if "ui_locales" in credentials and isinstance(credentials["ui_locales"], list):
# Make sure ui_locales a space separated string for oauthlib to handle it correctly.
credentials["ui_locales"] = " ".join(credentials["ui_locales"])
try:
# If skip_authorization field is True, skip the authorization screen even
# if this is the first use of the application and there was no previous authorization.
# This is useful for in-house applications-> assume an in-house applications
# are already approved.
if application.skip_authorization:
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes), credentials=credentials, allow=True
)
return self.redirect(uri, application)
elif require_approval == "auto":
tokens = (
get_access_token_model()
.objects.filter(
user=request.user, application=kwargs["application"], expires__gt=timezone.now()
)
.all()
)
# check past authorizations regarded the same scopes as the current one
for token in tokens:
if token.allow_scopes(scopes):
uri, headers, body, status = self.create_authorization_response(
request=self.request,
scopes=" ".join(scopes),
credentials=credentials,
allow=True,
)
return self.redirect(uri, application)
except OAuthToolkitError as error:
return self.error_response(error, application)
return self.render_to_response(self.get_context_data(**kwargs))
def handle_prompt_login(self):
path = self.request.build_absolute_uri()
resolved_login_url = resolve_url(self.get_login_url())
# If the login url is the same scheme and net location then use the
# path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme) and (
not login_netloc or login_netloc == current_netloc
):
path = self.request.get_full_path()
parsed = urlparse(path)
parsed_query = dict(parse_qsl(parsed.query))
parsed_query.pop("prompt")
parsed = parsed._replace(query=urlencode(parsed_query))
return redirect_to_login(
parsed.geturl(),
resolved_login_url,
self.get_redirect_field_name(),
)
def handle_no_permission(self):
"""
Generate response for unauthorized users.
If prompt is set to none, then we redirect with an error code
as defined by OIDC 3.1.2.6
Some code copied from OAuthLibMixin.error_response, but that is designed
to operated on OAuth1Error from oauthlib wrapped in a OAuthToolkitError
"""
prompt = self.request.GET.get("prompt")
redirect_uri = self.request.GET.get("redirect_uri")
if prompt == "none" and redirect_uri:
response_parameters = {"error": "login_required"}
# REQUIRED if the Authorization Request included the state parameter.
# Set to the value received from the Client
state = self.request.GET.get("state")
if state:
response_parameters["state"] = state
separator = "&" if "?" in redirect_uri else "?"
redirect_to = redirect_uri + separator + urlencode(response_parameters)
return self.redirect(redirect_to, application=None)
else:
return super().handle_no_permission()
@method_decorator(csrf_exempt, name="dispatch")
@method_decorator(login_not_required, name="dispatch")
| AuthorizationView |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 216776,
"end": 217624
} | class ____(object):
# https://github.com/argoproj/argo-events/blob/master/api/sensor.md#argoproj.io/v1alpha1.TriggerParameter
def __init__(self):
tree = lambda: defaultdict(tree)
self.payload = tree()
def src(self, dependency_name, value, data_key=None, data_template=None):
self.payload["src"] = {
"dependencyName": dependency_name,
"dataKey": data_key,
"dataTemplate": data_template,
"value": value,
# explicitly set it to false to ensure proper deserialization
"useRawData": False,
}
return self
def dest(self, dest):
self.payload["dest"] = dest
return self
def to_json(self):
return self.payload
def __str__(self):
return json.dumps(self.payload, indent=4)
| TriggerParameter |
python | weaviate__weaviate-python-client | weaviate/connect/integrations.py | {
"start": 1680,
"end": 2126
} | class ____(_IntegrationConfig):
access_key: str = Field(serialization_alias="X-Aws-Access-Key")
secret_key: str = Field(serialization_alias="X-Aws-Secret-Key")
requests_per_minute_embeddings: Optional[int] = Field(
serialization_alias="X-Aws-Ratelimit-RequestPM-Embedding"
)
tokens_per_minute_embeddings: Optional[int] = Field(
serialization_alias="X-Aws-Ratelimit-TokensPM-Embedding"
)
| _IntegrationConfigAWS |
python | dask__dask | dask/core.py | {
"start": 13787,
"end": 14902
} | class ____:
"""A small serializable object to wrap literal values without copying"""
__slots__ = ("data",)
def __init__(self, data):
self.data = data
def __repr__(self):
return f"literal<type={type(self.data).__name__}>"
def __reduce__(self):
return (literal, (self.data,))
def __call__(self):
return self.data
def quote(x):
"""Ensure that this value remains this value in a dask graph
Some values in dask graph take on special meaning. Sometimes we want to
ensure that our data is not interpreted but remains literal.
>>> add = lambda x, y: x + y
>>> quote((add, 1, 2))
(literal<type=tuple>,)
"""
if istask(x) or type(x) is list or type(x) is dict:
return (literal(x),)
return x
def reshapelist(shape, seq):
"""Reshape iterator to nested shape
>>> reshapelist((2, 3), range(6))
[[0, 1, 2], [3, 4, 5]]
"""
if len(shape) == 1:
return list(seq)
else:
n = int(len(seq) / shape[0])
return [reshapelist(shape[1:], part) for part in toolz.partition(n, seq)]
| literal |
python | coleifer__peewee | examples/analytics/reports.py | {
"start": 82,
"end": 4400
} | class ____(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
| Report |
python | kamyu104__LeetCode-Solutions | Python/form-largest-integer-with-digits-that-add-up-to-target.py | {
"start": 760,
"end": 1482
} | class ____(object):
def largestNumber(self, cost, target):
"""
:type cost: List[int]
:type target: int
:rtype: str
"""
def key(bag):
return sum(bag), bag
dp = [[0]*9]
for t in xrange(1, target+1):
dp.append([])
for d, c in enumerate(cost):
if t < c or not dp[t-c]:
continue
curr = dp[t-c][:]
curr[~d] += 1
if key(curr) > key(dp[t]):
dp[-1] = curr
if not dp[-1]:
return "0"
return "".join(str(9-i)*c for i, c in enumerate(dp[-1]))
# Time: O(t^2)
# Space: O(t^2)
| Solution2 |
python | scrapy__scrapy | tests/test_pipelines.py | {
"start": 5102,
"end": 11142
} | class ____:
def test_deprecated_process_item_spider_arg(self) -> None:
class CustomPipelineManager(ItemPipelineManager):
def process_item(self, item, spider): # pylint: disable=useless-parent-delegation
return super().process_item(item, spider)
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
itemproc = CustomPipelineManager.from_crawler(crawler)
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.process_item\(\) is deprecated, use process_item_async\(\)",
):
itemproc.process_item({}, crawler.spider)
@deferred_f_from_coro_f
async def test_integration_recommended(self, mockserver: MockServer) -> None:
class CustomPipelineManager(ItemPipelineManager):
async def process_item_async(self, item):
return await super().process_item_async(item)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
await maybe_deferred_to_future(crawler.crawl(mockserver=mockserver))
assert len(items) == 1
@deferred_f_from_coro_f
async def test_integration_no_async_subclass(self, mockserver: MockServer) -> None:
class CustomPipelineManager(ItemPipelineManager):
def open_spider(self, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.open_spider\(\) is deprecated, use open_spider_async\(\)",
):
return super().open_spider(spider)
def close_spider(self, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\)",
):
return super().close_spider(spider)
def process_item(self, item, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.process_item\(\) is deprecated, use process_item_async\(\)",
):
return super().process_item(item, spider)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides open_spider\(\) but doesn't override open_spider_async\(\)",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides close_spider\(\) but doesn't override close_spider_async\(\)",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides process_item\(\) but doesn't override process_item_async\(\)",
),
):
await maybe_deferred_to_future(crawler.crawl(mockserver=mockserver))
assert len(items) == 1
@deferred_f_from_coro_f
async def test_integration_no_async_not_subclass(
self, mockserver: MockServer
) -> None:
class CustomPipelineManager:
def __init__(self, crawler):
self.pipelines = [
p()
for p in build_component_list(
crawler.settings.getwithbase("ITEM_PIPELINES")
)
]
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def open_spider(self, spider):
return succeed(None)
def close_spider(self, spider):
return succeed(None)
def process_item(self, item, spider):
for pipeline in self.pipelines:
item = pipeline.process_item(item)
return succeed(item)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a open_spider_async\(\) method",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a close_spider_async\(\) method",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a process_item_async\(\) method",
),
):
await maybe_deferred_to_future(crawler.crawl(mockserver=mockserver))
assert len(items) == 1
| TestCustomPipelineManager |
python | conda__conda | conda/auxlib/exceptions.py | {
"start": 829,
"end": 901
} | class ____(AuxlibError, AttributeError):
pass
| ThisShouldNeverHappenError |
python | sympy__sympy | sympy/integrals/transforms.py | {
"start": 1761,
"end": 2418
} | class ____(NotImplementedError):
"""
Exception raised in relation to problems computing transforms.
Explanation
===========
This class is mostly used internally; if integrals cannot be computed
objects representing unevaluated transforms are usually returned.
The hint ``needeval=True`` can be used to disable returning transform
objects, and instead raise this exception if an integral cannot be
computed.
"""
def __init__(self, transform, function, msg):
super().__init__(
"%s Transform could not be computed: %s." % (transform, msg))
self.function = function
| IntegralTransformError |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 6005,
"end": 6147
} | class ____(scale_color_gradient):
"""
Create a 2 point color gradient
"""
_aesthetics = ["fill"]
@dataclass
| scale_fill_gradient |
python | pytorch__pytorch | test/inductor/test_torchinductor_codegen_config_overrides.py | {
"start": 643,
"end": 4755
} | class ____(InductorTestCase):
def run_and_compare(
self,
func: Callable[..., Any],
*args,
compile_kwargs: Optional[dict] = None,
config_patches: Optional[dict] = None,
atol: float | None = 1e-05,
rtol: float | None = 1e-08,
):
"""
Runs the module through Inductor, comparing to eager reference.
"""
if compile_kwargs is None:
compile_kwargs = {}
if config_patches is None:
config_patches = {}
def flatten_tensors(tensors):
flat, spec = pytree.tree_flatten(tensors)
return flat
with config.patch(config_patches):
compiled = torch.compile(func, backend="inductor", **compile_kwargs)
result, code = run_and_get_code(compiled, *args)
# Check numerical accuracy
ref_tensors = flatten_tensors(func(*args))
actual_tensors = flatten_tensors(result)
for ref, actual in zip(ref_tensors, actual_tensors):
self.assertTrue(torch.allclose(ref, actual, atol=atol, rtol=rtol))
return result, code
def count_code(self, substr: str, code: list[str], expected: Optional[int]):
count = sum(prog.count(substr) for prog in code)
if expected is not None:
self.assertEqual(count, expected)
@parametrize("force_pointwise_cat", [False, True])
def test_force_pointwise_cat(self, force_pointwise_cat: bool):
def func(a, b):
return torch.cat([a + 1, b + 2], dim=0)
a = torch.randn(1024, device=torch.device("cpu"))
b = torch.randn(1024, device=torch.device("cpu"))
config_patches = {
"force_pointwise_cat": force_pointwise_cat,
}
_, code = self.run_and_compare(
func,
a,
b,
config_patches=config_patches,
)
reinterpret_call = (
"= reinterpret_tensor_wrapper("
if config.cpp_wrapper
else "= reinterpret_tensor("
)
if force_pointwise_cat:
self.count_code(reinterpret_call, code, 0)
else:
self.count_code(reinterpret_call, code, 2)
@requires_gpu()
@skipIf(GPU_TYPE == "mps", "Triton is not available for MPS")
def test_cse_make_block_ptr_reduction(self):
def func(a, b):
tmp0 = a * b
tmp1 = a + b
c = tmp0 + tmp1
return c.sum(dim=0)
config_patches = {
"triton.use_block_ptr": True,
"triton.tile_reductions": True,
"triton.prefer_nd_tiling": True,
"triton.max_tiles": 3,
"split_reductions": False,
}
a = torch.randn((512, 4096), device=torch.device(GPU_TYPE))
b = torch.randn((512, 4096), device=torch.device(GPU_TYPE))
_, code = self.run_and_compare(
func,
a,
b,
config_patches=config_patches,
atol=1e-4,
)
self.count_code("= tl.make_block_ptr(in_ptr", code, 2)
self.count_code("= tl.load(block_ptr", code, 2)
@requires_gpu()
@skipIf(GPU_TYPE == "mps", "Triton is not available for MPS")
def test_kernel_fusion_thresholds(self):
def func(a, b):
tmp0 = a + 1
tmp1 = tmp0 + 2
tmp2 = tmp1 + 3
tmp3 = tmp2 + b
return tmp0, tmp2, tmp3
a = torch.randn(1024, device=torch.device(GPU_TYPE))
b = torch.randn(1024, device=torch.device(GPU_TYPE))
config_patches = {
"max_fusion_size": 1,
"realize_reads_threshold": 1,
"realize_opcount_threshold": 1,
"inplace_buffers": False,
}
_, code = self.run_and_compare(
func,
a,
b,
config_patches=config_patches,
)
self.count_code("@triton.jit", code, 3)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU or HAS_CPU:
run_tests(needs="filelock")
| CodegenInductorTest |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 8613,
"end": 8970
} | class ____(HTTPClientError):
status_code = 413
def __init__(self, max_size: int, actual_size: int, **kwargs: Any) -> None:
kwargs.setdefault(
"text",
f"Maximum request body size {max_size} exceeded, "
f"actual body size {actual_size}",
)
super().__init__(**kwargs)
| HTTPRequestEntityTooLarge |
python | wntrblm__nox | nox/_option_set.py | {
"start": 2930,
"end": 3411
} | class ____:
"""A single group for command-line options.
Args:
name (str): The name used to refer to the group.
args: Passed through to``ArgumentParser.add_argument_group``.
kwargs: Passed through to``ArgumentParser.add_argument_group``.
"""
__slots__ = ("args", "kwargs", "name")
def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:
self.name = name
self.args = args
self.kwargs = kwargs
| OptionGroup |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 14554,
"end": 21184
} | class ____(SiglipModel):
# Update: add `spatial_shapes` and `pixel_attention_mask`
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
spatial_shapes: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
Tensor containing the spatial dimensions (height, width) of the input images.
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`Siglip2VisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... image_features = model.get_image_features(**inputs)
```
"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
attention_mask=pixel_attention_mask,
spatial_shapes=spatial_shapes,
)
pooled_output = vision_outputs.pooler_output
return pooled_output
# Update: add `spatial_shapes` and `pixel_attention_mask`
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
spatial_shapes: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> Siglip2Output:
r"""
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
Tensor containing the spatial dimensions (height, width) of the input images.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModel
>>> import torch
>>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
>>> # important: we pass `padding=max_length` since the model was trained with this
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
>>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
31.9% that image 0 is 'a photo of 2 cats'
```
"""
# Use Siglip2 model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
attention_mask=pixel_attention_mask,
spatial_shapes=spatial_shapes,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
image_embeds = vision_outputs.pooler_output
text_embeds = text_outputs.pooler_output
# normalized features
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device))
logit_scale, logit_bias = self.logit_scale.to(text_embeds.device), self.logit_bias.to(text_embeds.device)
logits_per_text = logits_per_text * logit_scale.exp() + logit_bias
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
# Adapted from https://github.com/google-research/big_vision/blob/01edb81a4716f93a48be43b3a4af14e29cdb3a7f/big_vision/trainers/proj/image_text/siglip2.py#L287
eye = torch.eye(logits_per_text.size(0), device=logits_per_text.device)
m1_diag1 = -torch.ones_like(logits_per_text) + 2 * eye
loglik = torch.nn.functional.logsigmoid(m1_diag1 * logits_per_text)
nll = -torch.sum(loglik, dim=-1)
loss = nll.mean()
return Siglip2Output(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
| Siglip2Model |
python | spack__spack | lib/spack/spack/directives_meta.py | {
"start": 9520,
"end": 9641
} | class ____(spack.error.SpackError):
"""This is raised when something is wrong with a package directive."""
| DirectiveError |
python | mahmoud__boltons | boltons/cacheutils.py | {
"start": 13180,
"end": 15072
} | class ____(list):
"""The _HashedKey guarantees that hash() will be called no more than once
per cached function invocation.
"""
__slots__ = 'hash_value'
def __init__(self, key):
self[:] = key
self.hash_value = hash(tuple(key))
def __hash__(self):
return self.hash_value
def __repr__(self):
return f'{self.__class__.__name__}({list.__repr__(self)})'
def make_cache_key(args, kwargs, typed=False,
kwarg_mark=_KWARG_MARK,
fasttypes=frozenset([int, str, frozenset, type(None)])):
"""Make a generic key from a function's positional and keyword
arguments, suitable for use in caches. Arguments within *args* and
*kwargs* must be `hashable`_. If *typed* is ``True``, ``3`` and
``3.0`` will be treated as separate keys.
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
>>> tuple(make_cache_key(('a', 'b'), {'c': ('d')}))
('a', 'b', _KWARG_MARK, ('c', 'd'))
.. _hashable: https://docs.python.org/2/glossary.html#term-hashable
"""
# key = [func_name] if func_name else []
# key.extend(args)
key = list(args)
if kwargs:
sorted_items = sorted(kwargs.items())
key.append(kwarg_mark)
key.extend(sorted_items)
if typed:
key.extend([type(v) for v in args])
if kwargs:
key.extend([type(v) for k, v in sorted_items])
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedKey(key)
# for backwards compatibility in case someone was importing it
_make_cache_key = make_cache_key
| _HashedKey |
python | pytorch__pytorch | test/distributed/test_dynamo_distributed.py | {
"start": 11117,
"end": 19190
} | class ____(torch._dynamo.test_case.TestCase):
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@patch.object(config, "optimize_ddp", True)
@patch.object(torch._inductor.config, "fallback_random", True)
@unittest.skipIf(
torch._inductor.config.triton.native_matmul,
"FIXME : native matmul fails. RuntimeError: Cannot access data pointer of Tensor",
)
def test_hf_bert_ddp_inductor(self):
model, inputs = get_hf_bert(0)
model = FakeDDP(model)
run_hf_bert_ddp(self, model, inputs, "inductor")
@patch.object(config, "optimize_ddp", True)
def test_hf_bert_ddp_aot_eager(self):
model, inputs = get_hf_bert(0)
model = FakeDDP(model)
run_hf_bert_ddp(self, model, inputs, "aot_eager")
@patch.object(config, "optimize_ddp", True)
def test_issue90375(self):
class Model(nn.Module):
def forward(self):
return torch.randn(3) * torch.randn(3)
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(model, backend="aot_eager")
opt_model()
@patch.object(config, "optimize_ddp", True)
def test_symbol_splitting(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = nn.Parameter(torch.randn(512, 512))
self.weight2 = nn.Parameter(torch.randn(512, 512))
def forward(self, x):
x = torch.cat([x, x])
y = x @ self.weight1
z = x + y @ self.weight2
return z
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512))
@patch.object(config, "optimize_ddp", True)
def test_ddp_optimizer_inductor_strides_dont_specialize(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc_0 = nn.Linear(768, 768)
self.fc_1 = nn.Linear(768, 768)
def forward(self, x):
x = self.fc_0(x)
x = self.fc_1(x)
return x
model = Model()
model = FakeDDP(model)
inp = torch.randn((16, 18, 768))
inp2 = torch.randn((16, 20, 768))
torch._dynamo.mark_dynamic(inp, 1)
torch._dynamo.mark_dynamic(inp2, 1)
torch._dynamo.utils.clear_compilation_metrics()
torch._dynamo.reset()
try:
DDP._active_ddp_module = model
opt_model = torch.compile(model)
self.assertEqual(0, len(torch._dynamo.utils.get_compilation_metrics()))
opt_model(inp)
compile_count_before = len(torch._dynamo.utils.get_compilation_metrics())
opt_model(inp2)
compile_count_after = len(torch._dynamo.utils.get_compilation_metrics())
# no recompiles
self.assertEqual(compile_count_before, compile_count_after)
finally:
DDP._active_ddp_module = None
@config.patch(optimize_ddp=True, capture_scalar_outputs=True)
def test_unbacked_symbol_splitting_direct(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = nn.Parameter(torch.randn(512, 512))
self.weight2 = nn.Parameter(torch.randn(512, 512))
def forward(self, x, y):
u0, _ = y.tolist()
x = torch.cat([x, x])
y = x @ self.weight1
z = (x + y @ self.weight2) * u0
return z
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512), torch.tensor([12, 13]))
@config.patch(optimize_ddp=True, capture_scalar_outputs=True)
def test_unbacked_symbol_splitting_indirect(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = nn.Parameter(torch.randn(512, 512))
self.weight2 = nn.Parameter(torch.randn(512, 512))
def forward(self, x, y):
u0, _ = y.tolist()
a = torch.ones(u0)
x = torch.cat([x, x])
y = x @ self.weight1
z = (x + y @ self.weight2) * a.sum()
return z
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512), torch.tensor([12, 13]))
@config.patch(optimize_ddp=True, capture_scalar_outputs=True)
def test_unbacked_symbol_splitting_torture_multi(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = nn.Parameter(torch.randn(512, 512))
self.weight2 = nn.Parameter(torch.randn(512, 512))
self.weight3 = nn.Parameter(torch.randn(512, 512))
def forward(self, x, y):
# partition one (contains the u0 def)
u0, _ = y.tolist()
x = torch.cat([x, x])
y1 = x @ self.weight1
# partition two (contains the variable)
y2 = y1 @ self.weight2
a = torch.ones(u0)
# partition three
z = (x + y2 @ self.weight3) * a.sum()
return z
model = Model()
model = FakeDDP(model, bucket_cap_mb=1)
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512), torch.tensor([12, 13]))
@config.patch(optimize_ddp=True, capture_dynamic_output_shape_ops=True)
def test_unbacked_symbol_splitting_no_binding(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight1 = nn.Parameter(torch.randn(512, 512))
self.weight2 = nn.Parameter(torch.randn(512, 512))
def forward(self, x, y):
nz = y.nonzero()
x = torch.cat([x, x])
y = x @ self.weight1
z = (x + y @ self.weight2) * (nz + 1).sum()
return z
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(dynamic=True)(model)
opt_model(torch.randn(20, 512), torch.tensor([0.0, 12.0, 0.0, 11.0]))
@patch.object(config, "optimize_ddp", True)
def test_call_method_forward(self):
class Model(nn.Module):
def __init__(
self,
):
super().__init__()
layers = []
for _ in range(2):
layer = nn.ModuleList(
[
nn.LayerNorm(96),
nn.MultiheadAttention(
embed_dim=96, num_heads=4, batch_first=True
),
]
)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x: [Batch, Freq, Time, Feature]
B, F, T, H = x.shape
for m in self.layers:
x = x.reshape(B * F, T, H)
x = m[0](x)
x, _ = m[1].forward(x, x, x)
x = x.reshape(B, F, T, H)
return x
model = Model()
model = FakeDDP(model)
opt_model = torch.compile(model)
opt_model(torch.randn(2, 129, 100, 96))
# Are these tests failing? Check and see if TestFakeDistributedSingleProc has a
# single process version; if it's just a problem in the Dynamo distributed
# # optimizer, you should be able to repro it single process!
@requires_accelerator_dist_backend(["nccl", "xccl"])
| TestFakeDistributedSingleProc |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 19646,
"end": 20647
} | class ____(UserComponent):
"""
This class helps visualize Error's on the `MetaflowCard`. It can help catch and print stack traces to errors that happen in `@step` code.
### Parameters
- `exception` (Exception) : The `Exception` to visualize. This value will be `repr`'d before passed down to `MetaflowCard`
- `title` (str) : The title that will appear over the visualized `Exception`.
### Usage
```python
@card
@step
def my_step(self):
from metaflow.cards import Error
from metaflow import current
try:
...
...
except Exception as e:
current.card.append(
Error(e,"Something misbehaved")
)
...
```
"""
def __init__(self, exception, title=None):
self._exception = exception
self._title = title
@render_safely
def render(self):
return LogComponent("%s\n\n%s" % (self._title, repr(self._exception))).render()
| Error |
python | kubernetes-client__python | kubernetes/client/api/authorization_api.py | {
"start": 543,
"end": 5199
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/authorization.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| AuthorizationApi |
python | getsentry__sentry | src/sentry/metrics/dualwrite.py | {
"start": 584,
"end": 7156
} | class ____(MetricsBackend):
"""
This backend will send metrics to one or two backends, depending on options.
The backends are configured using the `primary_backend{_args}` and `secondary_backend{_args}`
kwargs.
Metrics are routed based on two allow-lists:
- `secondary_prefixes`: If the metric matches any of these prefixes,
it is routed *only* to *only* the secondary backend.
(for backwards compatibility reasons, the `allow_prefixes` list acts as a fallback)
- `distribution_prefixes`: If the metric matches any of these prefixes,
any `distribution` or `timing` metric is routed to *both* backends,
all other metrics are routed *only* to the primary backend.
- If the metric is not matched by any prefix, it is routed to *only* the primary backend.
Additionally, an `experimental_backend` can be configured with `experimental_args`.
Metrics will always be sent to the experimental backend (in addition to primary/secondary)
unless they match the `deny_list` prefixes in `experimental_args`.
"""
def __init__(self, **kwargs: Any):
super().__init__()
self._primary_backend = _initialize_backend(
kwargs.pop("primary_backend", None), kwargs.pop("primary_backend_args", {})
)
self._secondary_backend = _initialize_backend(
kwargs.pop("secondary_backend", None), kwargs.pop("secondary_backend_args", {})
)
self._experimental_backend = _initialize_backend(
kwargs.pop("experimental_backend", None), kwargs.pop("experimental_args", {})
)
self._distribution_prefixes = tuple(kwargs.pop("distribution_prefixes", []))
self._secondary_prefixes = tuple(
kwargs.pop("secondary_prefixes", []) or kwargs.pop("allow_prefixes", [])
)
def _distribution_choice(self, key: str) -> tuple[bool, bool]:
if key.startswith(self._secondary_prefixes):
return False, True
if key.startswith(self._distribution_prefixes):
return True, True
return True, False
def _other_choice(self, key: str) -> tuple[bool, bool]:
if key.startswith(self._secondary_prefixes):
return False, True
return True, False
def incr(
self,
key: str,
instance: str | None = None,
tags: Tags | None = None,
amount: float | int = 1,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
use_primary, use_secondary = self._other_choice(key)
if use_primary:
self._primary_backend.incr(
key, instance, tags, amount, sample_rate, unit, stacklevel + 1
)
if use_secondary:
self._secondary_backend.incr(
key, instance, tags, amount, sample_rate, unit, stacklevel + 1
)
self._experimental_backend.incr(
key, instance, tags, amount, sample_rate, unit, stacklevel + 1
)
def timing(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
stacklevel: int = 0,
) -> None:
use_primary, use_secondary = self._distribution_choice(key)
if use_primary:
self._primary_backend.timing(key, value, instance, tags, sample_rate, stacklevel + 1)
if use_secondary:
self._secondary_backend.timing(key, value, instance, tags, sample_rate, stacklevel + 1)
self._experimental_backend.timing(key, value, instance, tags, sample_rate, stacklevel + 1)
def gauge(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
use_primary, use_secondary = self._other_choice(key)
if use_primary:
self._primary_backend.gauge(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
if use_secondary:
self._secondary_backend.gauge(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
self._experimental_backend.gauge(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
def distribution(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
use_primary, use_secondary = self._distribution_choice(key)
if use_primary:
self._primary_backend.distribution(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
if use_secondary:
self._secondary_backend.distribution(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
self._experimental_backend.distribution(
key, value, instance, tags, sample_rate, unit, stacklevel + 1
)
def event(
self,
title: str,
message: str,
alert_type: str | None = None,
aggregation_key: str | None = None,
source_type_name: str | None = None,
priority: str | None = None,
instance: str | None = None,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
use_primary, use_secondary = self._other_choice(title)
if use_primary:
self._primary_backend.event(
title,
message,
alert_type,
aggregation_key,
source_type_name,
priority,
instance,
tags,
stacklevel + 1,
)
if use_secondary:
self._secondary_backend.event(
title,
message,
alert_type,
aggregation_key,
source_type_name,
priority,
instance,
tags,
stacklevel + 1,
)
self._experimental_backend.event(
title,
message,
alert_type,
aggregation_key,
source_type_name,
priority,
instance,
tags,
stacklevel + 1,
)
| DualWriteMetricsBackend |
python | FactoryBoy__factory_boy | examples/django_demo/generic_foreignkey/models.py | {
"start": 155,
"end": 513
} | class ____(models.Model):
"""Example GenericForeignKey model from django docs"""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
| TaggedItem |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 6497,
"end": 6763
} | class ____:
# https://github.com/pandas-dev/pandas/issues/19764
def setup(self):
self.s = Series(1, index=date_range("2012-01-01", freq="s", periods=10**6))
def time_series_datetimeindex_repr(self):
getattr(self.s, "a", None)
| SeriesGetattr |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py | {
"start": 674,
"end": 924
} | class ____(object):
__slots__ = ("name", "surname")
def __init__(self, name, middle_name):
self.name = name
self.middle_name = middle_name # [assigning-non-slot]
self.setup()
def setup(self):
pass
| StudentD |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/migrations/0005_historicalmodelwithcustomattronetoonefield_modelwithcustomattronetoonefield.py | {
"start": 251,
"end": 3229
} | class ____(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
(
"migration_test_app",
"0004_history_date_indexing",
),
]
operations = [
migrations.CreateModel(
name="ModelWithCustomAttrOneToOneField",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"what_i_mean",
simple_history.registry_tests.migration_test_app.models.CustomAttrNameOneToOneField(
attr_name="custom_attr_name",
on_delete=django.db.models.deletion.CASCADE,
to="migration_test_app.whatimean",
),
),
],
),
migrations.CreateModel(
name="HistoricalModelWithCustomAttrOneToOneField",
fields=[
(
"id",
models.IntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"what_i_mean",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="migration_test_app.whatimean",
),
),
],
options={
"verbose_name": "historical model with custom attr one to one field",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| Migration |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 48664,
"end": 49202
} | class ____(Elemwise):
_parameters = ["frame", "errors", "downcast", "meta"]
_defaults = {"errors": "raise", "downcast": None, "meta": None}
_keyword_only = ["meta"]
operation = staticmethod(pd.to_numeric)
@functools.cached_property
def _kwargs(self):
kwargs = super()._kwargs
kwargs.pop("meta", None)
return kwargs
@functools.cached_property
def _meta(self):
if self.operand("meta") is not None:
return self.operand("meta")
return super()._meta
| ToNumeric |
python | wandb__wandb | tests/unit_tests/test_artifacts/test_saved_model.py | {
"start": 2523,
"end": 5979
} | class ____(Artifact):
def _fetch_manifest(self) -> None: # type: ignore
return None
def make_local_artifact_public(art: Artifact, mocker: MockerFixture):
from wandb.sdk.artifacts._validators import FullArtifactPath
path = FullArtifactPath(
prefix="FAKE_ENTITY",
project="FAKE_PROJECT",
name="FAKE_NAME",
)
fragment = ArtifactFragment(
id="FAKE_ID",
artifactType={"name": "FAKE_TYPE_NAME"},
aliases=[
{
"id": "FAKE_ALIAS_ID",
"alias": "v0",
"artifactCollection": {
"__typename": "ArtifactSequence",
"name": path.name,
"project": {
"name": path.project,
"entity": {"name": path.prefix},
},
},
}
],
artifactSequence={
"name": "FAKE_SEQUENCE_NAME",
"project": {
"name": path.project,
"entity": {"name": path.prefix},
},
},
versionIndex=0,
description=None,
metadata=None,
state="COMMITTED",
size=0,
digest="FAKE_DIGEST",
commitHash="FAKE_HASH",
fileCount=0,
createdAt="FAKE_CREATED_AT",
updatedAt=None,
)
pub = ArtifactPatch._from_attrs(
path,
fragment,
client=mocker.Mock(spec=RetryingClient),
)
pub._manifest = art._manifest
return pub
# External SavedModel tests (user facing)
def saved_model_test(mocker, model, py_deps=None):
with pytest.raises(TypeError):
_ = saved_model._SavedModel(model)
kwargs = {}
if py_deps:
kwargs["dep_py_files"] = py_deps
sm = saved_model._SavedModel.init(model, **kwargs)
# Patch the download method of the ArtifactManifestEntry
# so we can simulate downloading an artifact without
# actually making a network round trip (using the local filesystem)
def _mock_download(self, root=None, skip_cache=None, executor=None):
root = root or self._parent_artifact._default_root()
dest = os.path.join(root, self.path)
return copy_or_overwrite_changed(self.local_path, dest)
mocker.patch.object(
ArtifactManifestEntry,
"download",
autospec=True,
side_effect=_mock_download,
)
mocker.patch.object(
ArtifactManifestEntry,
"_referenced_artifact_id",
autospec=True,
return_value=None,
)
art = wandb.Artifact("name", "type")
art.add(sm, "model")
assert art.manifest.entries[f"model.{sm._log_type}.json"] is not None
pub_art = make_local_artifact_public(art, mocker)
sm2 = pub_art.get("model")
assert sm2 is not None
# # Internal adapter tests (non user facing)
def subclass_test(
adapter_cls,
valid_models,
invalid_models,
):
# Verify valid models can be adapted
for model in valid_models:
assert adapter_cls._validate_obj(model)
# Verify invalid models are denied
for model in invalid_models:
assert not adapter_cls._validate_obj(model)
# Verify file-level serialization and deserialization
for model in valid_models:
path = adapter_cls._tmp_path()
adapter_cls._serialize(model, path)
model2 = adapter_cls._deserialize(path)
assert model2 is not None
| ArtifactPatch |
python | pennersr__django-allauth | allauth/socialaccount/providers/feishu/provider.py | {
"start": 346,
"end": 720
} | class ____(OAuth2Provider):
id = "feishu"
name = "feishu"
account_class = FeishuAccount
oauth2_adapter_class = FeishuOAuth2Adapter
def extract_uid(self, data):
return data["open_id"]
def extract_common_fields(self, data):
return dict(username=data.get("name"), name=data.get("name"))
provider_classes = [FeishuProvider]
| FeishuProvider |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_events/client.py | {
"start": 1478,
"end": 2756
} | class ____(BaseAsyncClient):
async def read_events(
self,
filter: "EventFilter | None" = None,
limit: int = 100,
) -> EventPage:
"""
query historical events from the API.
args:
filter: optional filter criteria to narrow down events
limit: maximum number of events to return per page (default 100)
returns:
EventPage containing events, total count, and next page link
"""
response = await self.request(
"POST",
"/events/filter",
json={
"filter": filter.model_dump(mode="json") if filter else None,
"limit": limit,
},
)
return EventPage.model_validate(response.json())
async def read_events_page(self, next_page_url: str) -> EventPage:
"""
retrieve the next page of events using a next_page URL.
args:
next_page_url: the next_page URL from a previous EventPage response
returns:
EventPage containing the next page of events
"""
response = await self._client.get(str(next_page_url))
response.raise_for_status()
return EventPage.model_validate(response.json())
| EventAsyncClient |
python | ray-project__ray | python/ray/_common/usage/usage_lib.py | {
"start": 2418,
"end": 2697
} | class ____:
cloud_provider: Optional[str] = None
min_workers: Optional[int] = None
max_workers: Optional[int] = None
head_node_instance_type: Optional[str] = None
worker_node_instance_types: Optional[List[str]] = None
@dataclass(init=True)
| ClusterConfigToReport |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass.py | {
"start": 458,
"end": 518
} | class ____:
pass
@six.add_metaclass(abc.ABCMeta)
| FirstGood |
python | fastai__fastai | fastai/layers.py | {
"start": 5018,
"end": 5284
} | class ____: Avg,Max,Cat = 'Avg','Max','Cat'
# %% ../nbs/01_layers.ipynb 32
def adaptive_pool(pool_type):
return nn.AdaptiveAvgPool2d if pool_type=='Avg' else nn.AdaptiveMaxPool2d if pool_type=='Max' else AdaptiveConcatPool2d
# %% ../nbs/01_layers.ipynb 33
| PoolType |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_checkpoint.py | {
"start": 2067,
"end": 9459
} | class ____(FSDPTest):
class SequentialModule(nn.Module):
def __init__(
self,
checkpoint_layer=False,
offload_activations=False,
wrap_fsdp=False,
*fsdp_args,
**fsdp_kwargs,
):
torch.manual_seed(0)
super().__init__()
l1 = nn.Linear(3, 3).to(device_type.type)
l2 = nn.Linear(3, 3).to(device_type.type)
l3 = nn.Linear(3, 3).to(device_type.type)
if checkpoint_layer:
if offload_activations:
ckpt_wrapper = offload_wrapper
else:
ckpt_wrapper = checkpoint_wrapper
l1 = ckpt_wrapper(l1)
l2 = ckpt_wrapper(l2)
l3 = ckpt_wrapper(l3)
fsdp_wrapper = partial(
_maybe_wrap_fsdp, *fsdp_args, wrap_fsdp=wrap_fsdp, **fsdp_kwargs
)
self.ffn = nn.Sequential(
fsdp_wrapper(l1),
fsdp_wrapper(l2),
fsdp_wrapper(l3),
)
def forward(self, x):
return self.ffn(x)
def _verify_parity(self, losses, outputs, models):
assert losses
assert outputs
assert models
for l, o in zip(losses[1:], outputs[1:]):
self.assertEqual(losses[0], l)
self.assertEqual(outputs[0], o)
# Verify grads
ref_model = models[0]
ref_grads = [p.grad for p in ref_model.parameters()]
for m in models[1:]:
grads = [p.grad for p in m.parameters()]
for ref_g, g in zip(ref_grads, grads):
self.assertEqual(ref_g, g)
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("offload_activations", [True, False])
@parametrize("use_orig_params", [False, True])
def test_checkpoint_fsdp_wrapping(
self,
cpu_offload: CPUOffload,
offload_activations: bool,
use_orig_params: bool,
):
# Test checkpoint(FSDP(layer1), FSDP(layer2), ....)
if offload_activations:
wrapper_to_use = offload_wrapper
else:
wrapper_to_use = checkpoint_wrapper
fsdp_kwargs = {"cpu_offload": cpu_offload, "use_orig_params": use_orig_params}
ckpt_sequential_wrapped_fsdp = wrapper_to_use(
TestFSDPCheckpoint.SequentialModule(
wrap_fsdp=True,
**fsdp_kwargs,
),
)
# Test FSDP(checkpoint(layer1)), FSDP(checkpoint(layer2)), ....
inner_ckpt = TestFSDPCheckpoint.SequentialModule(
checkpoint_layer=True,
offload_activations=offload_activations,
wrap_fsdp=True,
**fsdp_kwargs,
)
baseline = TestFSDPCheckpoint.SequentialModule(
wrap_fsdp=True,
**fsdp_kwargs,
)
# note that reentrant-based checkpointing requires inputs to have grad
# flag set.
inp = torch.randn(10, 3, device=device_type.type, requires_grad=True)
global _save_on_cpu_called
models = [ckpt_sequential_wrapped_fsdp, inner_ckpt, baseline]
with patch_save_on_cpu(get_patched_save_on_cpu()):
for i in range(2):
losses = []
outputs = []
for m in models:
check_offload = m != baseline and i == 0 and offload_activations
if check_offload:
self.assertFalse(_save_on_cpu_called)
out = m(inp)
if check_offload:
self.assertTrue(_save_on_cpu_called)
_save_on_cpu_called = False
loss = out.sum()
loss.backward()
losses.append(loss)
outputs.append(out)
self._verify_parity(losses, outputs, models)
dist.barrier()
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("offload_activations", [True, False])
@parametrize("use_orig_params", [False, True])
def test_basic_checkpoint_end_to_end(
self,
cpu_offload: CPUOffload,
offload_activations: bool,
use_orig_params: bool,
):
fsdp_kwargs = {"cpu_offload": cpu_offload, "use_orig_params": use_orig_params}
global _save_on_cpu_called
with patch_save_on_cpu(get_patched_save_on_cpu()):
seq = TestFSDPCheckpoint.SequentialModule().to(device_type.type)
# Runs FSDP with no checkpointing
fsdp_only_seq = FSDP(deepcopy(seq), **fsdp_kwargs)
# Runs checkpoint-wrapped FSDP
if offload_activations:
wrapper_to_use = offload_wrapper
else:
wrapper_to_use = checkpoint_wrapper
checkpointed_fsdp = wrapper_to_use(
FSDP(deepcopy(seq), **fsdp_kwargs),
)
# Runs FSDP-wrapped checkpointed module
fsdp_wrapped_checkpoint = FSDP(
wrapper_to_use(deepcopy(seq)),
**fsdp_kwargs,
)
# Runs FSDP with manual calls to checkpoint.
fsdp_call_checkpoint = FSDP(deepcopy(seq), **fsdp_kwargs)
# note that reentrant-based checkpointing requires inputs to have grad
# flag set.
inp = torch.randn(10, 3, device=device_type.type, requires_grad=True)
models = [
fsdp_only_seq,
checkpointed_fsdp,
fsdp_wrapped_checkpoint,
fsdp_call_checkpoint,
]
# Ensure _save_on_cpu is not yet called
self.assertFalse(_save_on_cpu_called)
for i in range(6):
losses = []
outputs = []
for m in models:
check_offload = (
m != fsdp_only_seq and i == 0 and offload_activations
)
if m == fsdp_call_checkpoint:
# _save_on_cpu should not be called yet
self.assertFalse(_save_on_cpu_called)
offload_ctx = (
get_patched_save_on_cpu()(pin_memory=True)
if offload_activations
else contextlib.nullcontext()
)
with offload_ctx:
out = checkpoint(m, inp, use_reentrant=True)
else:
# _save_on_cpu should not be called yet
self.assertFalse(_save_on_cpu_called)
out = m(inp)
if check_offload:
self.assertTrue(_save_on_cpu_called)
loss = out.sum()
loss.backward()
losses.append(loss)
outputs.append(out)
_save_on_cpu_called = False
self._verify_parity(losses, outputs, models)
dist.barrier()
instantiate_parametrized_tests(TestFSDPCheckpoint)
| TestFSDPCheckpoint |
python | python-pillow__Pillow | src/PIL/QoiImagePlugin.py | {
"start": 936,
"end": 4640
} | class ____(ImageFile.PyDecoder):
_pulls_fd = True
_previous_pixel: bytes | bytearray | None = None
_previously_seen_pixels: dict[int, bytes | bytearray] = {}
def _add_to_previous_pixels(self, value: bytes | bytearray) -> None:
self._previous_pixel = value
r, g, b, a = value
hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
self._previously_seen_pixels[hash_value] = value
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
self._previously_seen_pixels = {}
self._previous_pixel = bytearray((0, 0, 0, 255))
data = bytearray()
bands = Image.getmodebands(self.mode)
dest_length = self.state.xsize * self.state.ysize * bands
while len(data) < dest_length:
byte = self.fd.read(1)[0]
value: bytes | bytearray
if byte == 0b11111110 and self._previous_pixel: # QOI_OP_RGB
value = bytearray(self.fd.read(3)) + self._previous_pixel[3:]
elif byte == 0b11111111: # QOI_OP_RGBA
value = self.fd.read(4)
else:
op = byte >> 6
if op == 0: # QOI_OP_INDEX
op_index = byte & 0b00111111
value = self._previously_seen_pixels.get(
op_index, bytearray((0, 0, 0, 0))
)
elif op == 1 and self._previous_pixel: # QOI_OP_DIFF
value = bytearray(
(
(self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)
% 256,
(self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)
% 256,
(self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,
self._previous_pixel[3],
)
)
elif op == 2 and self._previous_pixel: # QOI_OP_LUMA
second_byte = self.fd.read(1)[0]
diff_green = (byte & 0b00111111) - 32
diff_red = ((second_byte & 0b11110000) >> 4) - 8
diff_blue = (second_byte & 0b00001111) - 8
value = bytearray(
tuple(
(self._previous_pixel[i] + diff_green + diff) % 256
for i, diff in enumerate((diff_red, 0, diff_blue))
)
)
value += self._previous_pixel[3:]
elif op == 3 and self._previous_pixel: # QOI_OP_RUN
run_length = (byte & 0b00111111) + 1
value = self._previous_pixel
if bands == 3:
value = value[:3]
data += value * run_length
continue
self._add_to_previous_pixels(value)
if bands == 3:
value = value[:3]
data += value
self.set_as_raw(data)
return -1, 0
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode == "RGB":
channels = 3
elif im.mode == "RGBA":
channels = 4
else:
msg = "Unsupported QOI image mode"
raise ValueError(msg)
colorspace = 0 if im.encoderinfo.get("colorspace") == "sRGB" else 1
fp.write(b"qoif")
fp.write(o32(im.size[0]))
fp.write(o32(im.size[1]))
fp.write(o8(channels))
fp.write(o8(colorspace))
ImageFile._save(im, fp, [ImageFile._Tile("qoi", (0, 0) + im.size)])
| QoiDecoder |
python | sanic-org__sanic | guide/webapp/display/page/docobject.py | {
"start": 484,
"end": 13840
} | class ____:
name: str
module_name: str
full_name: str
signature: inspect.Signature | None
docstring: Docstring
object_type: str = ""
methods: list[DocObject] = field(default_factory=list)
decorators: list[str] = field(default_factory=list)
def _extract_classes_methods(obj, full_name, docstrings):
methods = []
for method_name, method in inspect.getmembers(obj, _is_public_member):
try:
signature = _get_method_signature(method)
docstring = inspect.getdoc(method)
decorators = _detect_decorators(obj, method)
methods.append(
DocObject(
name=method_name,
module_name="",
full_name=f"{full_name}.{method_name}",
signature=signature,
docstring=parse_docstring(docstring or ""),
decorators=decorators,
object_type=_get_object_type(method),
)
)
except ValueError:
pass
docstrings[full_name].methods = methods
def _get_method_signature(method):
try:
return inspect.signature(method)
except TypeError:
signature = None
if func := getattr(method, "fget", None):
signature = inspect.signature(func)
return signature
def _is_public_member(obj: object) -> bool:
obj_name = getattr(obj, "__name__", "")
if func := getattr(obj, "fget", None):
obj_name = getattr(func, "__name__", "")
return (
not obj_name.startswith("_")
and not obj_name.isupper()
and (
inspect.ismethod(obj)
or inspect.isfunction(obj)
or isinstance(obj, property)
or isinstance(obj, property)
)
)
def _detect_decorators(cls, method):
decorators = []
method_name = getattr(method, "__name__", None)
if isinstance(cls.__dict__.get(method_name), classmethod):
decorators.append("classmethod")
if isinstance(cls.__dict__.get(method_name), staticmethod):
decorators.append("staticmethod")
if isinstance(method, property):
decorators.append("property")
return decorators
def _get_object_type(obj) -> str:
if inspect.isclass(obj):
return "class"
# If the object is a method, get the underlying function
if inspect.ismethod(obj):
obj = obj.__func__
# If the object is a coroutine or a coroutine function
if inspect.iscoroutine(obj) or inspect.iscoroutinefunction(obj):
return "async def"
return "def"
def organize_docobjects(package_name: str) -> dict[str, str]:
page_content: defaultdict[str, str] = defaultdict(str)
docobjects = _extract_docobjects(package_name)
page_registry: defaultdict[str, list[str]] = defaultdict(list)
for module, docobject in docobjects.items():
builder = Builder(name="Partial")
_docobject_to_html(docobject, builder)
ref = module.rsplit(".", module.count(".") - 1)[0]
page_registry[ref].append(module)
page_content[f"/api/{ref}.md"] += str(builder)
for ref, objects in page_registry.items():
page_content[f"/api/{ref}.md"] = (
_table_of_contents(objects) + page_content[f"/api/{ref}.md"]
)
return page_content
def _table_of_contents(objects: list[str]) -> str:
builder = Builder(name="Partial")
with builder.div(class_="table-of-contents"):
builder.h3("Table of Contents", class_="is-size-4")
for obj in objects:
module, name = obj.rsplit(".", 1)
builder.a(
E.strong(name),
E.small(module),
href=f"#{slugify(obj.replace('.', '-'))}",
class_="table-of-contents-item",
)
return str(builder)
def _extract_docobjects(package_name: str) -> dict[str, DocObject]:
docstrings = {}
package = importlib.import_module(package_name)
for _, name, _ in pkgutil.walk_packages(
package.__path__, package_name + "."
):
module = importlib.import_module(name)
for obj_name, obj in inspect.getmembers(module):
if (
obj_name.startswith("_")
or inspect.getmodule(obj) != module
or not callable(obj)
):
continue
try:
signature = inspect.signature(obj)
except ValueError:
signature = None
docstring = inspect.getdoc(obj)
full_name = f"{name}.{obj_name}"
docstrings[full_name] = DocObject(
name=obj_name,
full_name=full_name,
module_name=name,
signature=signature,
docstring=parse_docstring(docstring or ""),
object_type=_get_object_type(obj),
)
if inspect.isclass(obj):
_extract_classes_methods(obj, full_name, docstrings)
return docstrings
def _docobject_to_html(
docobject: DocObject, builder: Builder, as_method: bool = False
) -> None:
anchor_id = slugify(docobject.full_name.replace(".", "-"))
anchor = E.a("#", class_="anchor", href=f"#{anchor_id}")
class_name, heading = _define_heading_and_class(
docobject, anchor, as_method
)
with builder.div(class_=class_name):
builder(heading)
if docobject.docstring.short_description:
builder.div(
HTML(render_markdown(docobject.docstring.short_description)),
class_="short-description mt-3 is-size-5",
)
if docobject.object_type == "class":
mro = [
item
for idx, item in enumerate(
inspect.getmro(
getattr(
importlib.import_module(docobject.module_name),
docobject.name,
)
)
)
if idx > 0 and item not in (object, type)
]
if mro:
builder.div(
E.span("Inherits from: ", class_="is-italic"),
E.span(
", ".join([cls.__name__ for cls in mro]),
class_="has-text-weight-bold",
),
class_="short-description mt-3 is-size-5",
)
builder.p(
HTML(
_signature_to_html(
docobject.name,
docobject.object_type,
docobject.signature,
docobject.decorators,
)
),
class_="signature notification is-family-monospace",
)
if docobject.docstring.long_description:
builder.div(
HTML(render_markdown(docobject.docstring.long_description)),
class_="long-description mt-3",
)
if docobject.docstring.params:
with builder.div(class_="box mt-5"):
builder.h5(
"Parameters", class_="is-size-5 has-text-weight-bold"
)
_render_params(builder, docobject.docstring.params)
if docobject.docstring.returns:
_render_returns(builder, docobject)
if docobject.docstring.raises:
_render_raises(builder, docobject.docstring.raises)
if docobject.docstring.examples:
_render_examples(builder, docobject.docstring.examples)
for method in docobject.methods:
_docobject_to_html(method, builder, as_method=True)
def _signature_to_html(
name: str,
object_type: str,
signature: inspect.Signature | None,
decorators: list[str],
) -> str:
parts = []
parts.append("<span class='function-signature'>")
for decorator in decorators:
parts.append(
f"<span class='function-decorator'>@{decorator}</span><br>"
)
parts.append(
f"<span class='is-italic'>{object_type}</span> "
f"<span class='has-text-weight-bold'>{name}</span>("
)
if not signature:
parts.append("<span class='param-name'>self</span>)")
parts.append("</span>")
return "".join(parts)
for i, param in enumerate(signature.parameters.values()):
parts.append(f"<span class='param-name'>{escape(param.name)}</span>")
annotation = ""
if param.annotation != inspect.Parameter.empty:
annotation = escape(str(param.annotation))
parts.append(
f": <span class='param-annotation'>{annotation}</span>"
)
if param.default != inspect.Parameter.empty:
default = escape(str(param.default))
if annotation == "str":
default = f'"{default}"'
parts.append(f" = <span class='param-default'>{default}</span>")
if i < len(signature.parameters) - 1:
parts.append(", ")
parts.append(")")
if signature.return_annotation != inspect.Signature.empty:
return_annotation = escape(str(signature.return_annotation))
parts.append(
f": -> <span class='return-annotation'>{return_annotation}</span>"
)
parts.append("</span>")
return "".join(parts)
def _define_heading_and_class(
docobject: DocObject, anchor: Builder, as_method: bool
) -> tuple[str, Builder]:
anchor_id = slugify(docobject.full_name.replace(".", "-"))
anchor = E.a("#", class_="anchor", href=f"#{anchor_id}")
if as_method:
class_name = "method"
heading = E.h3(
docobject.name,
anchor,
class_="is-size-4 has-text-weight-bold mt-6",
id_=anchor_id,
)
else:
class_name = "docobject"
heading = E.h2(
E.span(docobject.module_name, class_="has-text-weight-light"),
".",
E.span(docobject.name, class_="has-text-weight-bold is-size-1"),
anchor,
class_="is-size-2",
id_=anchor_id,
)
return class_name, heading
def _render_params(builder: Builder, params: list[DocstringParam]) -> None:
for param in params:
with builder.dl(class_="mt-2"):
dt_args = [param.arg_name]
if param.type_name:
parts = [
E.br(),
E.span(
param.type_name,
class_=(
"has-text-weight-normal has-text-purple "
"is-size-7 ml-2"
),
),
]
dt_args.extend(parts)
builder.dt(*dt_args, class_="is-family-monospace")
builder.dd(
HTML(
render_markdown(
param.description
or param.arg_name
or param.type_name
or ""
)
)
)
def _render_raises(builder: Builder, raises: list[DocstringRaises]) -> None:
with builder.div(class_="box mt-5"):
builder.h5("Raises", class_="is-size-5 has-text-weight-bold")
for raise_ in raises:
with builder.dl(class_="mt-2"):
builder.dt(raise_.type_name, class_="is-family-monospace")
builder.dd(
HTML(
render_markdown(
raise_.description or raise_.type_name or ""
)
)
)
def _render_returns(builder: Builder, docobject: DocObject) -> None:
assert docobject.docstring.returns
return_type = docobject.docstring.returns.type_name
if not return_type or return_type == "None":
return
with builder.div(class_="box mt-5"):
if not return_type and docobject.signature:
return_type = docobject.signature.return_annotation
if not return_type or return_type == inspect.Signature.empty:
return_type = "N/A"
term = (
"Return"
if not docobject.docstring.returns.is_generator
else "Yields"
)
builder.h5(term, class_="is-size-5 has-text-weight-bold")
with builder.dl(class_="mt-2"):
builder.dt(return_type, class_="is-family-monospace")
builder.dd(
HTML(
render_markdown(
docobject.docstring.returns.description
or docobject.docstring.returns.type_name
or ""
)
)
)
def _render_examples(
builder: Builder, examples: list[DocstringExample]
) -> None:
with builder.div(class_="box mt-5"):
builder.h5("Examples", class_="is-size-5 has-text-weight-bold")
for example in examples:
with builder.div(class_="mt-2"):
builder(
HTML(
render_markdown(
example.description or example.snippet or ""
)
)
)
| DocObject |
python | astropy__astropy | astropy/visualization/wcsaxes/core.py | {
"start": 37913,
"end": 38017
} | class ____(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes.
"""
| WCSAxesSubplot |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 186132,
"end": 186512
} | class ____:
def test_inhibit_any_policy(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "inhibit_any_policy_5.pem"),
x509.load_pem_x509_certificate,
)
iap = cert.extensions.get_extension_for_class(
x509.InhibitAnyPolicy
).value
assert iap.skip_certs == 5
| TestInhibitAnyPolicyExtension |
python | django__django | tests/model_enums/tests.py | {
"start": 7883,
"end": 7990
} | class ____(float, models.Choices):
PI = 3.141592653589793, "π"
TAU = 6.283185307179586, "τ"
| Constants |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_userroles_details.py | {
"start": 2120,
"end": 2559
} | class ____(UserRolesDetailsTest):
method = "DELETE"
def test_simple(self) -> None:
role1 = self.create_user_role(name="test-role")
role2 = self.create_user_role(name="test-role2")
resp = self.get_response("test-role")
assert resp.status_code == 204
assert not UserRole.objects.filter(id=role1.id).exists()
assert UserRole.objects.filter(id=role2.id).exists()
| UserRolesDetailsDeleteTest |
python | pytorch__pytorch | test/inductor/test_select_algorithm.py | {
"start": 18272,
"end": 21254
} | class ____(TestCase):
@requires_gpu()
@requires_triton()
@config.patch(cuda_backend="triton")
def test_finalized_subclass_hooks(self):
"""
Tests that all registered triton template hooks have been finalized,
especially in the case that the hooks are finalized manually by the
caller i.e. by calling template.finalize_hook(hook_name)
"""
hook_identifier = "# CUSTOM_HOOK"
class ExtensionTritonTemplateKernel(TritonTemplateKernel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._register_extra_template_env_fns(
self.custom_hook,
)
def custom_hook(self) -> str:
"""
Custom hook that just returns a test string for validation
"""
def hook() -> str:
return hook_identifier
return self._register_hook("<CUSTOM_HOOK>", hook)
def inductor_meta_common(self):
return super().inductor_meta_common()
class ExtensionTritonTemplate(TritonTemplate):
kernel_type = ExtensionTritonTemplateKernel
add_template = ExtensionTritonTemplate(
name="add",
grid=lambda *args, **kwargs: (1, 1, 1),
source=(
r"""
{{def_kernel("A", "B")}}
{{custom_hook()}}
xoffset = tl.program_id(0)
xindex = xoffset + tl.arange(0, XBLOCK)
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(A + xindex)
tmp1 = tl.load(B + xindex)
tmp2 = tmp0 + tmp1
{{store_output(("xindex",), "tmp2", mask="xmask", val_shape=("XBLOCK",))}}
"""
),
)
XBLOCK = 32
def add_override(a, b, alpha=None):
layout = FixedLayout(a.get_device(), a.get_dtype(), a.get_size())
choices = []
add_template.maybe_append_choice(
choices,
input_nodes=(a, b),
layout=layout,
num_stages=1,
num_warps=2,
XBLOCK=XBLOCK,
)
return autotune_select_algorithm("add", choices, [a, b], layout)
with patch_lowering(
{
torch.ops.aten.add.Tensor: (
add_override,
True,
ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
False,
)
}
):
@torch.compile
def add(a, b):
return a + b
a = torch.zeros((XBLOCK,), device=GPU_TYPE)
b = torch.zeros((XBLOCK,), device=GPU_TYPE)
_result, kernels = run_and_get_kernels(add, a, b)
assert len(kernels) == 1
assert hook_identifier in kernels[0]
if __name__ == "__main__":
if IS_LINUX and HAS_GPU and is_big_gpu():
run_tests()
| TestTemplateRender |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 39442,
"end": 40684
} | class ____(Benchmark):
r"""
StyblinskiTang objective function.
This class defines the Styblinski-Tang [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{StyblinskiTang}}(x) = \sum_{i=1}^{n} \left(x_i^4
- 16x_i^2 + 5x_i \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -39.16616570377142n` for
:math:`x_i = -2.903534018185960` for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[-2.903534018185960 for _ in range(self.N)]]
self.fglob = -39.16616570377142 * self.N
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 4 - 16 * x ** 2 + 5 * x) / 2
| StyblinskiTang |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.