repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | get_repo_hexsha | python | def get_repo_hexsha(git_folder):
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha | Get the SHA1 of the current repo | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L49-L58 | null | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | checkout_with_fetch | python | def checkout_with_fetch(git_folder, refspec, repository="origin"):
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec) | Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L60-L68 | null | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | clone_to_path | python | def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success") | Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L70-L83 | null | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def get_files_in_commit(git_folder, commit_id="HEAD"):
"""List of files in HEAD commit.
"""
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/git_tools.py | get_files_in_commit | python | def get_files_in_commit(git_folder, commit_id="HEAD"):
repo = Repo(str(git_folder))
output = repo.git.diff("--name-only", commit_id+"^", commit_id)
return output.splitlines() | List of files in HEAD commit. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/git_tools.py#L85-L90 | null | """Pure git tools for managing local folder Git.
"""
import logging
from git import Repo, GitCommandError
_LOGGER = logging.getLogger(__name__)
def checkout_and_create_branch(repo, name):
"""Checkout branch. Create it if necessary"""
local_branch = repo.branches[name] if name in repo.branches else None
if not local_branch:
if name in repo.remotes.origin.refs:
# If origin branch exists but not local, git.checkout is the fatest way
# to create local branch with origin link automatically
msg = repo.git.checkout(name)
_LOGGER.debug(msg)
return
# Create local branch, will be link to origin later
local_branch = repo.create_head(name)
local_branch.checkout()
def checkout_create_push_branch(repo, name):
"""Checkout this branch. Create it if necessary, and push it to origin.
"""
try:
repo.git.checkout(name)
_LOGGER.info("Checkout %s success", name)
except GitCommandError:
_LOGGER.info("Checkout %s was impossible (branch does not exist). Creating it and push it.", name)
checkout_and_create_branch(repo, name)
repo.git.push('origin', name, set_upstream=True)
def do_commit(repo, message_template, branch_name, hexsha):
"Do a commit if modified/untracked files"
repo.git.add(repo.working_tree_dir)
if not repo.git.diff(staged=True):
_LOGGER.warning('No modified files in this Autorest run')
return False
checkout_and_create_branch(repo, branch_name)
msg = message_template.format(hexsha=hexsha)
commit = repo.index.commit(msg)
_LOGGER.info("Commit done: %s", msg)
return commit.hexsha
def get_repo_hexsha(git_folder):
"""Get the SHA1 of the current repo"""
repo = Repo(str(git_folder))
if repo.bare:
not_git_hexsha = "notgitrepo"
_LOGGER.warning("Not a git repo, SHA1 used will be: %s", not_git_hexsha)
return not_git_hexsha
hexsha = repo.head.commit.hexsha
_LOGGER.info("Found REST API repo SHA1: %s", hexsha)
return hexsha
def checkout_with_fetch(git_folder, refspec, repository="origin"):
"""Fetch the refspec, and checkout FETCH_HEAD.
Beware that you will ne in detached head mode.
"""
_LOGGER.info("Trying to fetch and checkout %s", refspec)
repo = Repo(str(git_folder))
repo.git.fetch(repository, refspec) # FETCH_HEAD should be set
repo.git.checkout("FETCH_HEAD")
_LOGGER.info("Fetch and checkout success for %s", refspec)
def clone_to_path(https_authenticated_url, folder, branch_or_commit=None):
"""Clone the given URL to the folder.
:param str branch_or_commit: If specified, switch to this branch. Branch must exist.
"""
_LOGGER.info("Cloning repo")
repo = Repo.clone_from(https_authenticated_url, str(folder))
# Do NOT clone and set branch at the same time, since we allow branch to be a SHA1
# And you can't clone a SHA1
if branch_or_commit:
_LOGGER.info("Checkout branch_or_commit %s", branch_or_commit)
repo.git.checkout(branch_or_commit)
_LOGGER.info("Clone success")
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | exception_to_github | python | def exception_to_github(github_obj_to_comment, summary=""):
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response) | If any exception comes, log them in the given Github obj. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L28-L56 | [
"def create_comment(github_object, body):\n \"\"\"Create a comment, whatever the object is a PR, a commit or an issue.\n \"\"\"\n try:\n return github_object.create_issue_comment(body) # It's a PR\n except AttributeError:\n return github_object.create_comment(body) # It's a commit/issue... | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | create_comment | python | def create_comment(github_object, body):
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) | Create a comment, whatever the object is a PR, a commit or an issue. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L63-L69 | null | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
# It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | configure_user | python | def configure_user(gh_token, repo):
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation') | git config --global user.email "you@example.com"
git config --global user.name "Your Name" | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L87-L93 | [
"def user_from_token(gh_token):\n \"\"\"Get user login from GitHub token\"\"\"\n github_con = Github(gh_token)\n return github_con.get_user()\n"
] | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | get_full_sdk_id | python | def get_full_sdk_id(gh_token, sdk_git_id):
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id | If the SDK git id is incomplete, try to complete it with user login | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L95-L100 | [
"def user_from_token(gh_token):\n \"\"\"Get user login from GitHub token\"\"\"\n github_con = Github(gh_token)\n return github_con.get_user()\n"
] | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | sync_fork | python | def sync_fork(gh_token, github_repo_id, repo, push=True):
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg) | Sync the current branch in this fork against the direct parent on Github | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L102-L128 | null | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | get_or_create_pull | python | def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise | Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L130-L165 | null | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | clone_to_path | python | def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit) | Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L167-L212 | [
"def checkout_with_fetch(git_folder, refspec, repository=\"origin\"):\n \"\"\"Fetch the refspec, and checkout FETCH_HEAD.\n Beware that you will ne in detached head mode.\n \"\"\"\n _LOGGER.info(\"Trying to fetch and checkout %s\", refspec)\n repo = Repo(str(git_folder))\n repo.git.fetch(repositor... | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | do_pr | python | def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr | Do the PR | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L214-L250 | null | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | remove_readonly | python | def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path) | Clear the readonly bit and reattempt the removal | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L253-L256 | null | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
@contextmanager
def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
"""Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github.
"""
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly)
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | manage_git_folder | python | def manage_git_folder(gh_token, temp_dir, git_id, *, pr_number=None):
_LOGGER.debug("Git ID %s", git_id)
if Path(git_id).exists():
yield git_id
return # Do not erase a local folder, just skip here
# Clone the specific branch
split_git_id = git_id.split("@")
branch = split_git_id[1] if len(split_git_id) > 1 else None
clone_to_path(gh_token, temp_dir, split_git_id[0], branch_or_commit=branch, pr_number=pr_number)
try:
yield temp_dir
# Pre-cleanup for Windows http://bugs.python.org/issue26660
finally:
_LOGGER.debug("Preclean Rest folder")
shutil.rmtree(temp_dir, onerror=remove_readonly) | Context manager to avoid readonly problem while cleanup the temp dir.
If PR number is given, use magic branches "pull" from Github. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L259-L278 | [
"def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):\n \"\"\"Clone the given repo_id to the folder.\n\n If PR number is specified fetch the magic branches\n pull/<id>/head or pull/<id>/merge from Github. \"merge\" is tried first, and fallback to \"head\".\n Beware ... | """Github tools.
"""
from contextlib import contextmanager
import logging
import os
from pathlib import Path
import shutil
import stat
from subprocess import CalledProcessError
import traceback
from urllib.parse import urlsplit, urlunsplit
from github import Github, GithubException
from git import Repo
from .git_tools import (
clone_to_path as _git_clone_to_path,
checkout_with_fetch
)
_LOGGER = logging.getLogger(__name__)
class ExceptionContext: # pylint: disable=too-few-public-methods
def __init__(self):
self.comment = None
@contextmanager
def exception_to_github(github_obj_to_comment, summary=""):
"""If any exception comes, log them in the given Github obj.
"""
context = ExceptionContext()
try:
yield context
except Exception: # pylint: disable=broad-except
if summary:
summary = ": ({})".format(summary)
error_type = "an unknown error"
try:
raise
except CalledProcessError as err:
error_type = "a Subprocess error"
content = "Command: {}\n".format(err.cmd)
content += "Finished with return code {}\n".format(err.returncode)
if err.output:
content += "and output:\n```shell\n{}\n```".format(err.output)
else:
content += "and no output"
except Exception: # pylint: disable=broad-except
content = "```python\n{}\n```".format(traceback.format_exc())
response = "<details><summary>Encountered {}{}</summary><p>\n\n".format(
error_type,
summary
)
response += content
response += "\n\n</p></details>"
context.comment = create_comment(github_obj_to_comment, response)
def user_from_token(gh_token):
"""Get user login from GitHub token"""
github_con = Github(gh_token)
return github_con.get_user()
def create_comment(github_object, body):
"""Create a comment, whatever the object is a PR, a commit or an issue.
"""
try:
return github_object.create_issue_comment(body) # It's a PR
except AttributeError:
return github_object.create_comment(body) # It's a commit/issue
def get_comments(github_object):
"""Get a list of comments, whater the object is a PR, a commit or an issue.
"""
try:
return github_object.get_issue_comments() # It's a PR
except AttributeError:
return github_object.get_comments() # It's a commit/issue
def get_files(github_object):
"""Get files from a PR or a commit.
"""
try:
return github_object.get_files() # Try as a PR object
except AttributeError:
return github_object.files # Try as a commit object
def configure_user(gh_token, repo):
"""git config --global user.email "you@example.com"
git config --global user.name "Your Name"
"""
user = user_from_token(gh_token)
repo.git.config('user.email', user.email or 'aspysdk2@microsoft.com')
repo.git.config('user.name', user.name or 'SwaggerToSDK Automation')
def get_full_sdk_id(gh_token, sdk_git_id):
"""If the SDK git id is incomplete, try to complete it with user login"""
if not '/' in sdk_git_id:
login = user_from_token(gh_token).login
return '{}/{}'.format(login, sdk_git_id)
return sdk_git_id
def sync_fork(gh_token, github_repo_id, repo, push=True):
"""Sync the current branch in this fork against the direct parent on Github"""
if not gh_token:
_LOGGER.warning('Skipping the upstream repo sync, no token')
return
_LOGGER.info('Check if repo has to be sync with upstream')
github_con = Github(gh_token)
github_repo = github_con.get_repo(github_repo_id)
if not github_repo.parent:
_LOGGER.warning('This repo has no upstream')
return
upstream_url = 'https://github.com/{}.git'.format(github_repo.parent.full_name)
upstream = repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
active_branch_name = repo.active_branch.name
if not active_branch_name in repo.remotes.upstream.refs:
_LOGGER.info('Upstream has no branch %s to merge from', active_branch_name)
return
else:
_LOGGER.info('Merge from upstream')
msg = repo.git.rebase('upstream/{}'.format(repo.active_branch.name))
_LOGGER.debug(msg)
if push:
msg = repo.git.push()
_LOGGER.debug(msg)
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):
"""Clone the given repo_id to the folder.
If PR number is specified fetch the magic branches
pull/<id>/head or pull/<id>/merge from Github. "merge" is tried first, and fallback to "head".
Beware that pr_number implies detached head, and then no push is possible.
If branch is specified, checkout this branch or commit finally.
:param str branch_or_commit: If specified, switch to this branch/commit.
:param int pr_number: PR number.
"""
_LOGGER.info("Clone SDK repository %s", sdk_git_id)
url_parsing = urlsplit(sdk_git_id)
sdk_git_id = url_parsing.path
if sdk_git_id.startswith("/"):
sdk_git_id = sdk_git_id[1:]
credentials_part = ''
if gh_token:
login = user_from_token(gh_token).login
credentials_part = '{user}:{token}@'.format(
user=login,
token=gh_token
)
else:
_LOGGER.warning('Will clone the repo without writing credentials')
https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(
credentials=credentials_part,
sdk_git_id=sdk_git_id
)
# Clone the repo
_git_clone_to_path(https_authenticated_url, folder)
# If this is a PR, do some fetch to improve the number of SHA1 available
if pr_number:
try:
checkout_with_fetch(folder, "pull/{}/merge".format(pr_number))
return
except Exception: # pylint: disable=broad-except
pass # Assume "merge" doesn't exist anymore, fetch "head"
checkout_with_fetch(folder, "pull/{}/head".format(pr_number))
# If there is SHA1, checkout it. If PR number was given, SHA1 could be inside that PR.
if branch_or_commit:
repo = Repo(str(folder))
repo.git.checkout(branch_or_commit)
def do_pr(gh_token, sdk_git_id, sdk_pr_target_repo_id, branch_name, base_branch, pr_body=""): # pylint: disable=too-many-arguments
"Do the PR"
if not gh_token:
_LOGGER.info('Skipping the PR, no token found')
return None
if not sdk_pr_target_repo_id:
_LOGGER.info('Skipping the PR, no target repo id')
return None
github_con = Github(gh_token)
sdk_pr_target_repo = github_con.get_repo(sdk_pr_target_repo_id)
if '/' in sdk_git_id:
sdk_git_owner = sdk_git_id.split('/')[0]
_LOGGER.info("Do the PR from %s", sdk_git_owner)
head_name = "{}:{}".format(sdk_git_owner, branch_name)
else:
head_name = branch_name
sdk_git_repo = github_con.get_repo(sdk_git_id)
sdk_git_owner = sdk_git_repo.owner.login
try:
github_pr = sdk_pr_target_repo.create_pull(
title='Automatic PR from {}'.format(branch_name),
body=pr_body,
head=head_name,
base=base_branch
)
except GithubException as err:
if err.status == 422 and err.data['errors'][0].get('message', '').startswith('A pull request already exists'):
matching_pulls = sdk_pr_target_repo.get_pulls(base=base_branch, head=sdk_git_owner+":"+head_name)
matching_pull = matching_pulls[0]
_LOGGER.info('PR already exists: %s', matching_pull.html_url)
return matching_pull
raise
_LOGGER.info("Made PR %s", github_pr.html_url)
return github_pr
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
@contextmanager
class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
def as_raw_link(self):
"""Returns a GithubLink to a raw content.
"""
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
)
class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
def create_comment(self, text):
"""Mimic issue API, so we can use it everywhere.
Return dashboard comment.
"""
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def get_or_create(cls, issue, header, text=None):
"""Get or create the dashboard comment in this issue.
"""
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | GithubLink.as_raw_link | python | def as_raw_link(self):
if self.link_type == "raw":
return self # Can be discussed if we need an hard copy, or fail
if self.link_type != "blob":
raise ValueError("Cannot get a download link from a tree link")
return self.__class__(
self.gitid,
"raw",
self.branch_or_commit,
self.path,
self.token
) | Returns a GithubLink to a raw content. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L316-L329 | null | class GithubLink:
def __init__(self, gitid, link_type, branch_or_commit, path, token=None): # pylint: disable=too-many-arguments
self.gitid = gitid
self.link_type = link_type
self.branch_or_commit = branch_or_commit
self.path = path
self.token = token
@classmethod
def from_string(cls, github_url):
parsed = urlsplit(github_url)
netloc = parsed.netloc
if "@" in netloc:
token, netloc = netloc.split("@")
else:
token = None
split_path = parsed.path.split("/")
split_path.pop(0) # First is always empty
gitid = split_path.pop(0) + "/" + split_path.pop(0)
link_type = split_path.pop(0) if netloc != "raw.githubusercontent.com" else "raw"
branch_or_commit = split_path.pop(0)
path = "/".join(split_path)
return cls(gitid, link_type, branch_or_commit, path, token)
def __repr__(self):
if self.link_type == "raw":
netloc = "raw.githubusercontent.com"
path = "/".join(["", self.gitid, self.branch_or_commit, self.path])
# If raw and token, needs to be passed with "Authorization: token <token>", so nothing to do here
else:
netloc = "github.com" if not self.token else self.token + "@github.com"
path = "/".join(["", self.gitid, self.link_type, self.branch_or_commit, self.path])
return urlunsplit(("https", netloc, path, '', ''))
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | DashboardCommentableObject.create_comment | python | def create_comment(self, text):
return DashboardComment.get_or_create(self._issue_or_pr, self._header, text) | Mimic issue API, so we can use it everywhere.
Return dashboard comment. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L336-L340 | [
"def get_or_create(cls, issue, header, text=None):\n \"\"\"Get or create the dashboard comment in this issue.\n \"\"\"\n for comment in get_comments(issue):\n try:\n if comment.body.splitlines()[0] == header:\n obj = cls(comment, header)\n break\n exce... | class DashboardCommentableObject: # pylint: disable=too-few-public-methods
def __init__(self, issue_or_pr, header):
self._issue_or_pr = issue_or_pr
self._header = header
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/github_tools.py | DashboardComment.get_or_create | python | def get_or_create(cls, issue, header, text=None):
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError: # The comment body is empty
pass
# Hooooooo, no dashboard comment, let's create one
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj | Get or create the dashboard comment in this issue. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/github_tools.py#L348-L364 | [
"def create_comment(github_object, body):\n \"\"\"Create a comment, whatever the object is a PR, a commit or an issue.\n \"\"\"\n try:\n return github_object.create_issue_comment(body) # It's a PR\n except AttributeError:\n return github_object.create_comment(body) # It's a commit/issue... | class DashboardComment:
def __init__(self, github_comment, header):
self.github_comment = github_comment
self._header = header
@classmethod
def edit(self, text):
self.github_comment.edit(self._header+"\n"+text)
@property
def body(self):
return self.github_comment.body[len(self._header+"\n"):]
def delete(self):
self.github_comment.delete()
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/bot_framework.py | build_from_issue_comment | python | def build_from_issue_comment(gh_token, body):
if body["action"] in ["created", "edited"]:
github_con = Github(gh_token)
repo = github_con.get_repo(body['repository']['full_name'])
issue = repo.get_issue(body['issue']['number'])
text = body['comment']['body']
try:
comment = issue.get_comment(body['comment']['id'])
except UnknownObjectException:
# If the comment has already disapeared, skip the command
return None
return WebhookMetadata(repo, issue, text, comment)
return None | Create a WebhookMetadata from a comment added to an issue. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/bot_framework.py#L25-L39 | null | from collections import namedtuple
from functools import lru_cache
import logging
import os
import re
from github import Github, GithubException, UnknownObjectException
from .github_tools import (
exception_to_github,
)
_LOGGER = logging.getLogger(__name__)
def order(function):
function.bot_order = True
return function
WebhookMetadata = namedtuple(
'WebhookMetadata',
['repo', 'issue', 'text', 'comment']
)
def build_from_issues(gh_token, body):
"""Create a WebhookMetadata from an opening issue text.
"""
if body["action"] in ["opened", "edited"]:
github_con = Github(gh_token)
repo = github_con.get_repo(body['repository']['full_name'])
issue = repo.get_issue(body['issue']['number'])
text = body['issue']['body']
comment = issue # It's where we update the comment: in the issue itself
return WebhookMetadata(repo, issue, text, comment)
return None
@lru_cache()
def robot_name_from_env_variable():
github_con = Github(os.environ["GH_TOKEN"])
return github_con.get_user().login
class BotHandler:
def __init__(self, handler, robot_name=None, gh_token=None):
self.handler = handler
self.gh_token = gh_token or os.environ["GH_TOKEN"]
self.robot_name = robot_name or robot_name_from_env_variable()
def _is_myself(self, body):
return body['sender']['login'].lower() == self.robot_name.lower()
def issue_comment(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issue_comment(self.gh_token, body)
return self.manage_comment(webhook_data)
def issues(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issues(self.gh_token, body)
return self.manage_comment(webhook_data)
def orders(self):
"""Return method tagged "order" in the handler.
"""
return [order_cmd for order_cmd in dir(self.handler)
if getattr(getattr(self.handler, order_cmd), "bot_order", False)]
def manage_comment(self, webhook_data):
if webhook_data is None:
return {'message': 'Nothing for me'}
# Is someone talking to me:
message = re.search("@{} (.*)".format(self.robot_name), webhook_data.text, re.I)
response = None
if message:
command = message.group(1)
split_text = command.lower().split()
orderstr = split_text.pop(0)
if orderstr == "help":
response = self.help_order()
elif orderstr in self.orders():
try: # Reaction is fun, but it's preview not prod.
# Be careful, don't fail the command if we can't thumbs up...
webhook_data.comment.create_reaction("+1")
except GithubException:
pass
with exception_to_github(webhook_data.issue): # Just in case
response = getattr(self.handler, orderstr)(webhook_data.issue, *split_text)
else:
response = "I didn't understand your command:\n```bash\n{}\n```\nin this context, sorry :(\n".format(
command
)
response += self.help_order()
if response:
webhook_data.issue.create_comment(response)
return {'message': response}
return {'message': 'Nothing for me or exception'}
def help_order(self):
orders = ["This is what I can do:"]
for orderstr in self.orders():
orders.append("- `{}`".format(orderstr))
orders.append("- `help` : this help message")
return "\n".join(orders)
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/bot_framework.py | build_from_issues | python | def build_from_issues(gh_token, body):
if body["action"] in ["opened", "edited"]:
github_con = Github(gh_token)
repo = github_con.get_repo(body['repository']['full_name'])
issue = repo.get_issue(body['issue']['number'])
text = body['issue']['body']
comment = issue # It's where we update the comment: in the issue itself
return WebhookMetadata(repo, issue, text, comment)
return None | Create a WebhookMetadata from an opening issue text. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/bot_framework.py#L41-L51 | null | from collections import namedtuple
from functools import lru_cache
import logging
import os
import re
from github import Github, GithubException, UnknownObjectException
from .github_tools import (
exception_to_github,
)
_LOGGER = logging.getLogger(__name__)
def order(function):
function.bot_order = True
return function
WebhookMetadata = namedtuple(
'WebhookMetadata',
['repo', 'issue', 'text', 'comment']
)
def build_from_issue_comment(gh_token, body):
"""Create a WebhookMetadata from a comment added to an issue.
"""
if body["action"] in ["created", "edited"]:
github_con = Github(gh_token)
repo = github_con.get_repo(body['repository']['full_name'])
issue = repo.get_issue(body['issue']['number'])
text = body['comment']['body']
try:
comment = issue.get_comment(body['comment']['id'])
except UnknownObjectException:
# If the comment has already disapeared, skip the command
return None
return WebhookMetadata(repo, issue, text, comment)
return None
@lru_cache()
def robot_name_from_env_variable():
github_con = Github(os.environ["GH_TOKEN"])
return github_con.get_user().login
class BotHandler:
def __init__(self, handler, robot_name=None, gh_token=None):
self.handler = handler
self.gh_token = gh_token or os.environ["GH_TOKEN"]
self.robot_name = robot_name or robot_name_from_env_variable()
def _is_myself(self, body):
return body['sender']['login'].lower() == self.robot_name.lower()
def issue_comment(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issue_comment(self.gh_token, body)
return self.manage_comment(webhook_data)
def issues(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issues(self.gh_token, body)
return self.manage_comment(webhook_data)
def orders(self):
"""Return method tagged "order" in the handler.
"""
return [order_cmd for order_cmd in dir(self.handler)
if getattr(getattr(self.handler, order_cmd), "bot_order", False)]
def manage_comment(self, webhook_data):
if webhook_data is None:
return {'message': 'Nothing for me'}
# Is someone talking to me:
message = re.search("@{} (.*)".format(self.robot_name), webhook_data.text, re.I)
response = None
if message:
command = message.group(1)
split_text = command.lower().split()
orderstr = split_text.pop(0)
if orderstr == "help":
response = self.help_order()
elif orderstr in self.orders():
try: # Reaction is fun, but it's preview not prod.
# Be careful, don't fail the command if we can't thumbs up...
webhook_data.comment.create_reaction("+1")
except GithubException:
pass
with exception_to_github(webhook_data.issue): # Just in case
response = getattr(self.handler, orderstr)(webhook_data.issue, *split_text)
else:
response = "I didn't understand your command:\n```bash\n{}\n```\nin this context, sorry :(\n".format(
command
)
response += self.help_order()
if response:
webhook_data.issue.create_comment(response)
return {'message': response}
return {'message': 'Nothing for me or exception'}
def help_order(self):
orders = ["This is what I can do:"]
for orderstr in self.orders():
orders.append("- `{}`".format(orderstr))
orders.append("- `help` : this help message")
return "\n".join(orders)
|
Azure/azure-python-devtools | src/azure_devtools/ci_tools/bot_framework.py | BotHandler.orders | python | def orders(self):
return [order_cmd for order_cmd in dir(self.handler)
if getattr(getattr(self.handler, order_cmd), "bot_order", False)] | Return method tagged "order" in the handler. | train | https://github.com/Azure/azure-python-devtools/blob/2bf87b1f3cedd2b26fb2e4fd47a9baf435dcf936/src/azure_devtools/ci_tools/bot_framework.py#L80-L84 | null | class BotHandler:
def __init__(self, handler, robot_name=None, gh_token=None):
self.handler = handler
self.gh_token = gh_token or os.environ["GH_TOKEN"]
self.robot_name = robot_name or robot_name_from_env_variable()
def _is_myself(self, body):
return body['sender']['login'].lower() == self.robot_name.lower()
def issue_comment(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issue_comment(self.gh_token, body)
return self.manage_comment(webhook_data)
def issues(self, body):
if self._is_myself(body):
return {'message': 'I don\'t talk to myself, I\'m not schizo'}
webhook_data = build_from_issues(self.gh_token, body)
return self.manage_comment(webhook_data)
def manage_comment(self, webhook_data):
if webhook_data is None:
return {'message': 'Nothing for me'}
# Is someone talking to me:
message = re.search("@{} (.*)".format(self.robot_name), webhook_data.text, re.I)
response = None
if message:
command = message.group(1)
split_text = command.lower().split()
orderstr = split_text.pop(0)
if orderstr == "help":
response = self.help_order()
elif orderstr in self.orders():
try: # Reaction is fun, but it's preview not prod.
# Be careful, don't fail the command if we can't thumbs up...
webhook_data.comment.create_reaction("+1")
except GithubException:
pass
with exception_to_github(webhook_data.issue): # Just in case
response = getattr(self.handler, orderstr)(webhook_data.issue, *split_text)
else:
response = "I didn't understand your command:\n```bash\n{}\n```\nin this context, sorry :(\n".format(
command
)
response += self.help_order()
if response:
webhook_data.issue.create_comment(response)
return {'message': response}
return {'message': 'Nothing for me or exception'}
def help_order(self):
orders = ["This is what I can do:"]
for orderstr in self.orders():
orders.append("- `{}`".format(orderstr))
orders.append("- `help` : this help message")
return "\n".join(orders)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.setup_smtp | python | def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user) | Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L95-L112 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.enable | python | def enable(self):
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir) | Enable the crash reporter. CrashReporter is defaulted to be enabled on creation. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L123-L142 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.disable | python | def disable(self):
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled') | Disable the crash reporter. No reports will be sent or saved. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L144-L153 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.start_watcher | python | def start_watcher(self):
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start() | Start the watcher that periodically checks for offline reports and attempts to upload them. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L155-L166 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.stop_watcher | python | def stop_watcher(self):
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.') | Stop the watcher thread that tries to send offline reports. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L168-L174 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.exception_handler | python | def exception_handler(self, etype, evalue, tb):
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb) | Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return: | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L204-L225 | [
"def _analyze_traceback(self, traceback):\n # To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of\n # infinite recursion errors.\n limit = CrashReporter.recursion_depth_limit if self._recursion_error else None\n analyzed_tb = analyze_traceback(tr... | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.forward_exception | python | def forward_exception(self, etype, evalue, tb):
self._excepthook(etype, evalue, tb) | Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return: | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L227-L236 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.handle_payload | python | def handle_payload(self, payload):
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path) | Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return: | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L238-L262 | [
"def store_report(self, payload):\n \"\"\"\n Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.\n The newest crash report always named is 01\n \"\"\"\n offline_reports = self.get_offline_reports()\n if offline_reports:\n # Increment the... | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.subject | python | def subject(self):
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report' | Return a string to be used as the email subject line. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L301-L309 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.delete_offline_reports | python | def delete_offline_reports(self):
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports | Delete all stored offline reports
:return: List of reports that still require submission | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L328-L352 | [
"def get_offline_reports(self):\n return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace(\"%d\", \"*\"))))\n"
] | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.submit_offline_reports | python | def submit_offline_reports(self):
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success) | Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports) | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L354-L387 | [
"def delete_offline_reports(self):\n \"\"\"\n Delete all stored offline reports\n :return: List of reports that still require submission\n \"\"\"\n reports = self.get_offline_reports()\n remaining_reports = reports[:]\n for report in reports:\n with open(report, 'r') as _f:\n ... | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter.store_report | python | def store_report(self, payload):
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path | Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01 | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L389-L412 | [
"def get_offline_reports(self):\n return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace(\"%d\", \"*\"))))\n"
] | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/crashreporter.py | CrashReporter._watcher_thread | python | def _watcher_thread(self):
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.') | Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L471-L485 | null | class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
lobocv/crashreporter | crashreporter/process.py | enable_mp_crash_reporting | python | def enable_mp_crash_reporting():
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True | Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work. | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/process.py#L11-L20 | null | import logging
import multiprocessing
import sys
import traceback
from .tools import analyze_traceback
mp_crash_reporting_enabled = False
class CrashReportingProcess(multiprocessing.Process):
"""
Monkey-patch class that replaces Process in the multiprocessing library. It adds the ability to catch any
uncaught exceptions, serialize the crash report and pipe it through to the main process which can then use
it's CrashReporter instance to upload the crash report.
On the main process, calls to CrashReporter.poll() must periodically be called in order to check if there are
any crash reports waiting in the pipe.
"""
_crash_reporting = True
cr_pipes = []
def __init__(self, *args, **kwargs):
super(CrashReportingProcess, self).__init__(*args, **kwargs)
self.cr_remote_conn, self.cr_local_conn = multiprocessing.Pipe(duplex=False)
CrashReportingProcess.cr_pipes.append((self.cr_remote_conn, self.cr_local_conn))
def exception_handler(self, e):
logging.debug('CrashReporter: Crash detected on process {}'.format(self.name))
etype, evalue, tb = sys.exc_info()
analyzed_traceback = analyze_traceback(tb)
logging.debug('CrashReporter: Done analyzing traceback on process {}'.format(self.name))
logging.debug('CrashReporter: Sending traceback data to main process'.format(self.name))
try:
self.cr_local_conn.send((etype.__name__, '%s' % evalue, analyzed_traceback))
except Exception as e:
logging.error('CrashReporter: Could not send traceback data to main process.')
def run(self):
clsname = self.__class__.__name__
try:
logging.debug('{cls}: Starting {cls}: {name}'.format(cls=clsname, name=self.name))
super(CrashReportingProcess, self).run()
logging.debug('{cls}: Preparing to exit {cls}: {name}'.format(cls=clsname, name=self.name))
except Exception as e:
logging.info('{cls}: Error encountered in {name}'.format(cls=clsname, name=self.name))
traceback.print_exc()
self.exception_handler(e) |
lobocv/crashreporter | crashreporter/tools.py | string_variable_lookup | python | def string_variable_lookup(tb, s):
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope | Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L28-L70 | null | __author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info
def get_local_references(tb, max_string_length=1000):
"""
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
def format_reference(ref, max_string_length=1000):
"""
Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return:
"""
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info |
lobocv/crashreporter | crashreporter/tools.py | get_object_references | python | def get_object_references(tb, source, max_string_length=1000):
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info | Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value) | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L73-L91 | [
"def string_variable_lookup(tb, s):\n \"\"\"\n Look up the value of an object in a traceback by a dot-lookup string.\n ie. \"self.crashreporter.application_name\"\n\n Returns ValueError if value was not found in the scope of the traceback.\n\n :param tb: traceback\n :param s: lookup string\n :r... | __author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def string_variable_lookup(tb, s):
"""
Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the
"""
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope
def get_local_references(tb, max_string_length=1000):
"""
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
def format_reference(ref, max_string_length=1000):
"""
Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return:
"""
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info |
lobocv/crashreporter | crashreporter/tools.py | get_local_references | python | def get_local_references(tb, max_string_length=1000):
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals | Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value) | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L94-L113 | [
"def repr(object):\n try:\n return _repr(object)\n except Exception as e:\n logging.error(e)\n return 'String Representation not found'\n",
"def format_reference(ref, max_string_length=1000):\n \"\"\"\n Converts an object / value into a string representation to pass along in the p... | __author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def string_variable_lookup(tb, s):
"""
Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the
"""
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info
def format_reference(ref, max_string_length=1000):
"""
Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return:
"""
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info |
lobocv/crashreporter | crashreporter/tools.py | format_reference | python | def format_reference(ref, max_string_length=1000):
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr | Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return: | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L116-L152 | [
"def repr(object):\n try:\n return _repr(object)\n except Exception as e:\n logging.error(e)\n return 'String Representation not found'\n"
] | __author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def string_variable_lookup(tb, s):
"""
Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the
"""
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info
def get_local_references(tb, max_string_length=1000):
"""
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
def analyze_traceback(tb, inspection_level=None, limit=None):
"""
Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
"""
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info |
lobocv/crashreporter | crashreporter/tools.py | analyze_traceback | python | def analyze_traceback(tb, inspection_level=None, limit=None):
info = []
tb_level = tb
extracted_tb = traceback.extract_tb(tb, limit=limit)
for ii, (filepath, line, module, code) in enumerate(extracted_tb):
func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame)
d = {"File": filepath,
"Error Line Number": line,
"Module": module,
"Error Line": code,
"Module Line Number": func_lineno,
"Custom Inspection": {},
"Source Code": ''}
if inspection_level is None or len(extracted_tb) - ii <= inspection_level:
# Perform advanced inspection on the last `inspection_level` tracebacks.
d['Source Code'] = ''.join(func_source)
d['Local Variables'] = get_local_references(tb_level)
d['Object Variables'] = get_object_references(tb_level, d['Source Code'])
tb_level = getattr(tb_level, 'tb_next', None)
info.append(d)
return info | Extract trace back information into a list of dictionaries.
:param tb: traceback
:return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L155-L183 | [
"def get_object_references(tb, source, max_string_length=1000):\n \"\"\"\n Find the values of referenced attributes of objects within the traceback scope.\n\n :param tb: traceback\n :return: list of tuples containing (variable name, value)\n \"\"\"\n global obj_ref_regex\n referenced_attr = set... | __author__ = 'calvin'
import inspect
import logging
import re
import traceback
from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType
try:
import numpy as np
_NUMPY_INSTALLED = True
except ImportError:
_NUMPY_INSTALLED = False
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*")
dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])")
_repr = repr
def repr(object):
try:
return _repr(object)
except Exception as e:
logging.error(e)
return 'String Representation not found'
def string_variable_lookup(tb, s):
"""
Look up the value of an object in a traceback by a dot-lookup string.
ie. "self.crashreporter.application_name"
Returns ValueError if value was not found in the scope of the traceback.
:param tb: traceback
:param s: lookup string
:return: value of the
"""
refs = []
dot_refs = s.split('.')
DOT_LOOKUP = 0
DICT_LOOKUP = 1
for ii, ref in enumerate(dot_refs):
dict_refs = dict_lookup_regex.findall(ref)
if dict_refs:
bracket = ref.index('[')
refs.append((DOT_LOOKUP, ref[:bracket]))
refs.extend([(DICT_LOOKUP, t) for t in dict_refs])
else:
refs.append((DOT_LOOKUP, ref))
scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError)
if scope is ValueError:
return scope
for lookup, ref in refs[1:]:
try:
if lookup == DOT_LOOKUP:
scope = getattr(scope, ref, ValueError)
else:
scope = scope.get(ref, ValueError)
except Exception as e:
logging.error(e)
scope = ValueError
if scope is ValueError:
return scope
elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)):
return ValueError
return scope
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info
def get_local_references(tb, max_string_length=1000):
"""
Find the values of the local variables within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
if 'self' in tb.tb_frame.f_locals:
_locals = [('self', repr(tb.tb_frame.f_locals['self']))]
else:
_locals = []
for k, v in tb.tb_frame.f_locals.iteritems():
if k == 'self':
continue
try:
vstr = format_reference(v, max_string_length=max_string_length)
_locals.append((k, vstr))
except TypeError:
pass
return _locals
def format_reference(ref, max_string_length=1000):
"""
Converts an object / value into a string representation to pass along in the payload
:param ref: object or value
:param max_string_length: maximum number of characters to represent the object
:return:
"""
_pass = lambda *args: None
_numpy_info = ('dtype', 'shape', 'size', 'min', 'max')
additionals = []
if _NUMPY_INSTALLED and isinstance(ref, np.ndarray):
# Check for numpy info
for np_attr in _numpy_info:
np_value = getattr(ref, np_attr, None)
if np_value is not None:
if inspect.isbuiltin(np_value):
try:
np_value = np_value()
except Exception as e:
logging.error(e)
continue
additionals.append((np_attr, np_value))
elif isinstance(ref, (list, tuple, set, dict)):
# Check for length of reference
length = getattr(ref, '__len__', _pass)()
if length is not None:
additionals.append(('length', length))
if additionals:
vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)])
else:
vstr = repr(ref)
if len(vstr) > max_string_length:
vstr = vstr[:max_string_length] + ' ...'
return vstr
|
lobocv/crashreporter | crashreporter/injector.py | inject_path | python | def inject_path(path):
try:
dirname = os.path.dirname(path)
if dirname not in sys.path:
exists_in_sys = False
sys.path.append(dirname)
else:
exists_in_sys = True
module_name = os.path.splitext(os.path.split(path)[1])[0]
if module_name in sys.modules:
reload(sys.modules[module_name])
else:
__import__(module_name)
if not exists_in_sys:
sys.path.remove(dirname)
except Exception as e:
return e | Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function
is called the module is reloaded so that you can alter your debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised) | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/injector.py#L11-L33 | null | import sys
import os
print '******************************** WARNING **************************************\n' \
' CRASHREPORTER CODE INJECTION HAS BEEN IMPORTED.\n' \
' IT IS HIGHLY RECOMMENDED THAT THIS ONLY BE USED IN DEVELOPMENT\n' \
' FOR DEBUGGING PURPOSES AS IT ALLOW POSSIBLE MALICIOUS CODE TO BE INJECTED.\n'\
'*******************************************************************************' \
def inject_module(module, *args, **kwargs):
"""
Imports a function from a python module :module: and executes it with *args, **kwargs arguments. Dotted referencing
can be used to specify the function from the module.
For example, the following code will execute func1 and func2 from module mymodule with no arguments
inject_module('mymodule.func1')
inject_module('mymodule.func2')
Everytime this function is called the module is reloaded so that you can alter your
debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised)
"""
try:
parsed = module.split('.')
if len(parsed) == 1:
module_name, func_name = parsed[0], 'debug'
elif len(parsed) == 2:
module_name, func_name = parsed
if module_name in sys.modules:
mod = sys.modules[module_name]
reload(mod)
else:
mod = __import__(module_name)
f = getattr(mod, func_name, None)
if f:
return f(*args, **kwargs)
except Exception as e:
print e
return e |
lobocv/crashreporter | crashreporter/injector.py | inject_module | python | def inject_module(module, *args, **kwargs):
try:
parsed = module.split('.')
if len(parsed) == 1:
module_name, func_name = parsed[0], 'debug'
elif len(parsed) == 2:
module_name, func_name = parsed
if module_name in sys.modules:
mod = sys.modules[module_name]
reload(mod)
else:
mod = __import__(module_name)
f = getattr(mod, func_name, None)
if f:
return f(*args, **kwargs)
except Exception as e:
print e
return e | Imports a function from a python module :module: and executes it with *args, **kwargs arguments. Dotted referencing
can be used to specify the function from the module.
For example, the following code will execute func1 and func2 from module mymodule with no arguments
inject_module('mymodule.func1')
inject_module('mymodule.func2')
Everytime this function is called the module is reloaded so that you can alter your
debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised) | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/injector.py#L36-L68 | null | import sys
import os
print '******************************** WARNING **************************************\n' \
' CRASHREPORTER CODE INJECTION HAS BEEN IMPORTED.\n' \
' IT IS HIGHLY RECOMMENDED THAT THIS ONLY BE USED IN DEVELOPMENT\n' \
' FOR DEBUGGING PURPOSES AS IT ALLOW POSSIBLE MALICIOUS CODE TO BE INJECTED.\n'\
'*******************************************************************************' \
def inject_path(path):
"""
Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function
is called the module is reloaded so that you can alter your debug code while the application is running.
The result of the function is returned, otherwise the exception is returned (if one is raised)
"""
try:
dirname = os.path.dirname(path)
if dirname not in sys.path:
exists_in_sys = False
sys.path.append(dirname)
else:
exists_in_sys = True
module_name = os.path.splitext(os.path.split(path)[1])[0]
if module_name in sys.modules:
reload(sys.modules[module_name])
else:
__import__(module_name)
if not exists_in_sys:
sys.path.remove(dirname)
except Exception as e:
return e
|
lobocv/crashreporter | crashreporter/api.py | upload_report | python | def upload_report(server, payload, timeout=HQ_DEFAULT_TIMEOUT):
try:
data = json.dumps(payload)
r = requests.post(server + '/reports/upload', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r | Upload a report to the server.
:param payload: Dictionary (JSON serializable) of crash data.
:return: server response | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/api.py#L12-L24 | null | __author__ = 'calvin'
import requests
import json
import logging
HQ_DEFAULT_TIMEOUT = 10
SMTP_DEFAULT_TIMEOUT = 5
def upload_many_reports(server, payloads, timeout=HQ_DEFAULT_TIMEOUT):
try:
data = json.dumps(payloads)
r = requests.post(server + '/reports/upload_many', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT):
"""
Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response
"""
try:
r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
|
lobocv/crashreporter | crashreporter/api.py | delete_report | python | def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT):
try:
r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r | Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response | train | https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/api.py#L38-L50 | null | __author__ = 'calvin'
import requests
import json
import logging
HQ_DEFAULT_TIMEOUT = 10
SMTP_DEFAULT_TIMEOUT = 5
def upload_report(server, payload, timeout=HQ_DEFAULT_TIMEOUT):
"""
Upload a report to the server.
:param payload: Dictionary (JSON serializable) of crash data.
:return: server response
"""
try:
data = json.dumps(payload)
r = requests.post(server + '/reports/upload', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
def upload_many_reports(server, payloads, timeout=HQ_DEFAULT_TIMEOUT):
try:
data = json.dumps(payloads)
r = requests.post(server + '/reports/upload_many', data=data, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
|
moonso/ped_parser | ped_parser/parser.py | cli | python | def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_dict:
pp(my_parser.to_dict()) | Cli for testing the ped parser. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L641-L668 | [
"def to_dict(self):\n \"\"\"\n Return the information from the pedigree file as a dictionary.\n family id is key and a list with dictionarys for each individual \n as value.\n\n Returns:\n families (dict): A dictionary with the families\n \"\"\"\n\n self.logger.debug(\"Return the informa... | #!/usr/bin/env python
# encoding: utf-8
"""
parser.py
Parse a iterator with family info, this can be a file handle, a file stream,
a list of strings etc.
The family info can be in several formats, these are .ped , .fam,
.txt(extended ped format).
.ped and .fam always have 6 columns, these are
Family_ID - '.' or '0' for unknown
Individual_ID - '.' or '0' for unknown
Paternal_ID - '.' or '0' for unknown
Maternal_ID - '.' or '0' for unknown
Sex - '1'=male; '2'=female; ['other', '0', '.']=unknown
Phenotype - '1'=unaffected, '2'=affected, ['-9', '0', '.']= missing,
The other types must specify the columns in the header.
Header allways start with '#'.
These files allways start with the ped columns described above.
The following column names will be treated with care, which means that they
will be used when outputting a madeline type of file or makes accesable
variables in the parser:
'InheritanceModel' - a ';'-separated list of expected inheritance models.
Choices are:
['AR','AR_hom','AR_denovo','AR_hom_denovo','AR_hom_dn','AR_dn',
'AR_compound','AR_comp','AD','AD_dn','AD_denovo','X','X_dn',
'X_denovo','NA','Na','na','.']
'Proband' - 'Yes', 'No', 'Unknown' or '.'. A proband is the first affected
member of a pedigree coming to medical attention.
'Consultand' - 'Yes', 'No', 'Unknown' or '.'. A consultand is an individual
who has sought genetic counseling or testing.
'Alive' - 'Yes', 'No', 'Unknown' or '.'
Create a family object and its family members from different types of input file
Created by Måns Magnusson on 2013-01-17.
Copyright (c) 2013 __MoonsoInc__. All rights reserved.
"""
from __future__ import print_function
import json
import logging
import click
from string import whitespace
from ped_parser import (Individual, Family)
from ped_parser.log import init_log
from ped_parser.exceptions import (WrongAffectionStatus, WrongPhenotype,
WrongGender, PedigreeError, WrongLineFormat)
############### Names of genetic models ###############
# These are stored as global variables and can be altered is the user
# prefer other model names or want to add names
AR_HOM_NAMES = ['AR', 'AR_hom']
AR_HOM_DN_NAMES = ['AR_denovo', 'AR_hom_denovo', 'AR_hom_dn', 'AR_dn']
COMPOUND_NAMES = ['AR_compound', 'AR_comp']
AD_NAMES = ['AD', 'AD_dn', 'AD_denovo']
X_NAMES = ['X', 'X_dn', 'X_denovo']
NA_NAMES = ['NA', 'Na', 'na', '.']
class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
@click.command()
@click.argument('family_file',
nargs=1,
type=click.File(),
metavar="<family_file> or '-'"
)
@click.option('-t', '--family_type',
type=click.Choice(['ped', 'alt', 'cmms', 'mip']),
default='ped',
help='If the analysis use one of the known setups, please specify which one. Default is ped'
)
@click.option('--to_json',
is_flag=True,
help='Print the ped file in json format'
)
@click.option('--to_madeline',
is_flag=True,
help='Print the ped file in madeline format'
)
@click.option('--to_ped',
is_flag=True,
help='Print the ped file in ped format with headers'
)
@click.option('--to_dict',
is_flag=True,
help='Print the ped file in ped format with headers'
)
@click.option('-o', '--outfile',
type=click.File('a')
)
@click.option('-l', '--logfile',
type=click.Path(exists=False),
help="Path to log file. If none logging is "\
"printed to stderr."
)
@click.option('--loglevel',
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR',
'CRITICAL']),
default='INFO',
help="Set the level of log output."
)
def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
"""Cli for testing the ped parser."""
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_dict:
pp(my_parser.to_dict())
if __name__ == '__main__':
from ped_parser import init_log, logger
init_log(logger, loglevel='DEBUG')
cli()
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.get_individual | python | def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual | Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L127-L193 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.check_line_length | python | def check_line_length(self, splitted_line, expected_length):
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return | Check if the line is correctly formated. Throw a SyntaxError if it is not. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L195-L203 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.ped_parser | python | def ped_parser(self, family_info):
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object) | Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L205-L238 | [
"def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,\n genetic_models = None, proband='.', consultand='.', alive='.'):\n \"\"\"\n Return a individual object based on the indata.\n\n Arguments:\n family_id (str): The id for this family\n sample_id (str):... | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.alternative_parser | python | def alternative_parser(self, family_file):
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i] | Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L241-L345 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.check_cmms_id | python | def check_cmms_id(self, ind_id):
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True | Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L347-L374 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.check_cmms_affection_status | python | def check_cmms_affection_status(self, ind_object):
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True | Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L376-L401 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.check_cmms_gender | python | def check_cmms_gender(self, ind_object):
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True | Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L403-L420 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.get_models | python | def get_models(self, genetic_models):
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names | Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L422-L453 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.to_dict | python | def to_dict(self):
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families | Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L455-L478 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.to_madeline | python | def to_madeline(self):
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline() | Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L515-L541 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/parser.py | FamilyParser.to_ped | python | def to_ped(self):
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info) | Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L543-L597 | null | class FamilyParser(object):
"""
Parses a iterator with family info and creates a family object with
individuals.
"""
def __init__(self, family_info, family_type = 'ped', cmms_check=False):
"""
Arguments:
family_info (iterator)
family_type (str): Any of [ped, alt, cmms, fam, mip]
cmms_check (bool, optional): Perform CMMS validations?
"""
super(FamilyParser, self).__init__()
if __name__ == "__main__":
self.logger = logging.getLogger("ped_parser.FamilyParser")
else:
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing family parser")
self.cmms_check = cmms_check
self.family_type = family_type
self.logger.info("Family type:{0}".format(family_type))
self.families = {}
self.individuals = {}
self.legal_ar_hom_names = AR_HOM_NAMES
self.logger.debug("Legal AR hom names:{0}".format(AR_HOM_NAMES))
self.legal_ar_hom_dn_names = AR_HOM_DN_NAMES
self.logger.debug("Legal AR dn names:{0}".format(AR_HOM_DN_NAMES))
self.legal_compound_names = COMPOUND_NAMES
self.logger.debug("Legal AR compound names:{0}".format(COMPOUND_NAMES))
self.legal_ad_names = AD_NAMES
self.logger.debug("Legal AD compound names:{0}".format(AD_NAMES))
self.legal_x_names = X_NAMES
self.logger.debug("Legal X hom names:{0}".format(X_NAMES))
self.legal_na_names = NA_NAMES
self.logger.debug("Legal NA names:{0}".format(NA_NAMES))
self.header = ['family_id', 'sample_id', 'father_id',
'mother_id', 'sex', 'phenotype']
if self.family_type in ['ped', 'fam']:
self.ped_parser(family_info)
elif self.family_type == 'alt':
self.alternative_parser(family_info)
elif self.family_type in ['cmms', 'mip']:
self.alternative_parser(family_info)
# elif family_type == 'broad':
# self.broad_parser(individual_line, line_count)
for fam in self.families:
self.families[fam].family_check()
def get_individual(self, family_id, sample_id, father_id, mother_id, sex, phenotype,
genetic_models = None, proband='.', consultand='.', alive='.'):
"""
Return a individual object based on the indata.
Arguments:
family_id (str): The id for this family
sample_id (str): The id for this sample
father_id (str): The id for this samples father
mother_id (str): The id for this samples mother
sex (str): The id for the sex of this sample
phenotype (str): The id for the phenotype of this sample
genetic_models (str): A ';'-separated string with the expected
models of inheritance for this sample
proband (str): 'Yes', 'No' or '.'
consultand (str): 'Yes', 'No' or '.' if the individual is sequenced
alive (str): 'Yes', 'No' or '.'
returns:
individual (Individual): A Individual object with the information
"""
if sex not in ['1', '2']:
sex = '0'
if phenotype not in ['1', '2']:
phenotype = '0'
if mother_id == '.':
mother_id = '0'
if father_id == '.':
father_id = '0'
if genetic_models:
genetic_models = genetic_models.split(';')
if proband == 'Yes':
proband = 'Y'
elif proband == 'No':
proband = 'N'
else:
proband = '.'
if consultand == 'Yes':
consultand = 'Y'
elif consultand == 'No':
consultand = 'N'
else:
consultand = '.'
if alive == 'Yes':
alive = 'Y'
elif alive == 'No':
alive = 'N'
else:
alive = '.'
individual = Individual(
sample_id,
family_id,
mother_id,
father_id,
sex,
phenotype,
genetic_models,
proband,
consultand,
alive
)
return individual
def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
def alternative_parser(self, family_file):
"""
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
"""
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
def check_cmms_id(self, ind_id):
"""
Take the ID and check if it is following the cmms standard.
The standard is year:id-generation-indcode:affectionstatus.
Year is two digits, id three digits, generation in roman letters
indcode are digits and affection status are in ['A', 'U', 'X'].
Example 11001-II-1A.
Input:
ind_obj : A individual object
Yields:
bool : True if it is correct
"""
ind_id = ind_id.split('-')
# This in A (=affected), U (=unaffected) or X (=unknown)
family_id = ind_id[0]
try:
int(family_id)
except ValueError:
return False
affection_status = ind_id[-1][-1]
try:
type(affection_status.isalpha())
except ValueError:
return False
return True
def check_cmms_affection_status(self, ind_object):
"""
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
"""
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
def check_cmms_gender(self, ind_object):
"""
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
"""
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
def get_models(self, genetic_models):
"""
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
"""
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
def to_dict(self):
"""
Return the information from the pedigree file as a dictionary.
family id is key and a list with dictionarys for each individual
as value.
Returns:
families (dict): A dictionary with the families
"""
self.logger.debug("Return the information as a dictionary")
families = {}
for family_id in self.families:
family = []
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
family.append(individual.to_json())
self.logger.debug("Adding individual {0} to family {1}".format(
individual_id, family_id
))
self.logger.debug("Adding family {0}".format(family_id))
families[family_id] = family
return families
def to_json(self):
"""
Yield the information from the pedigree file as a json object.
This is a list with lists that represents families, families have
dictionaries that represents individuals like
[
[
{
'family_id:family_id',
'id':individual_id,
'sex':gender_code,
'phenotype': phenotype_code,
'mother': mother_id,
'father': father_id
},
{
...
}
],
[
]
]
This object can easily be converted to a json object.
Yields:
the information in json format
"""
#json_families = []
for family_id in self.families:
#json_families.append(self.families[family_id].to_json())
yield self.families[family_id].to_json()
#return json.dumps(json_families)
def to_madeline(self):
"""
Return a generator with the info in madeline format.
Yields:
An iterator with family info in madeline format
"""
madeline_header = [
'FamilyID',
'IndividualID',
'Gender',
'Father',
'Mother',
'Affected',
'Proband',
'Consultand',
'Alive'
]
yield '\t'.join(madeline_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id]
yield individual.to_madeline()
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info)
|
moonso/ped_parser | ped_parser/individual.py | Individual.check_grandparents | python | def check_grandparents(self, mother = None, father = None):
if mother:
if mother.mother != '0':
self.grandparents[mother.mother] = ''
elif mother.father != '0':
self.grandparents[mother.father] = ''
if father:
if father.mother != '0':
self.grandparents[father.mother] = ''
elif father.father != '0':
self.grandparents[father.father] = ''
return | Check if there are any grand parents.
Set the grandparents id:s
Arguments:
mother (Individual): An Individual object that represents the mother
father (Individual): An Individual object that represents the father | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/individual.py#L98-L120 | null | class Individual(object):
"""docstring for Individual"""
def __init__(self, ind, family='0', mother='0', father='0',sex='0',phenotype='0',
genetic_models=None, proband='.', consultand='.', alive='.'):
#TODO write test to throw exceptions if malformed input.
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating individual")
self.individual_id = ind #Individual Id STRING
self.logger.debug("Individual id: {0}".format(self.individual_id))
self.family = family #Family Id STRING
self.logger.debug("Family id: {0}".format(self.family))
self.mother = mother #Mother Id STRING
self.logger.debug("Mother id: {0}".format(self.mother))
self.father = father # Father Id STRING
self.logger.debug("Father id: {0}".format(self.father))
self.affected = False
self.healthy = False
self.extra_info = {}
# For madeline:
self.proband = proband
self.logger.debug("Proband: {0}".format(self.proband))
self.consultand = consultand
self.logger.debug("Consultand: {0}".format(self.consultand))
self.alive = alive
self.logger.debug("Alive: {0}".format(self.alive))
try:
self.sex = int(sex) # Sex Integer
self.logger.debug("Sex: {0}".format(self.sex))
self.phenotype = int(phenotype) # Phenotype INTEGER
self.logger.debug("Phenotype: {0}".format(self.phenotype))
except ValueError:
raise SyntaxError('Sex and phenotype have to be integers.')
self.has_parents = False
self.has_both_parents = False
if self.mother != '0':
self.has_parents = True
if self.father != '0':
self.has_both_parents = True
elif self.father != '0':
self.has_parents = True
self.logger.debug("Individual has parents: {0}".format(self.has_parents))
# These features will be added
#TODO make use of family relations:
self.siblings = set()
self.grandparents = dict()
self.first_cousins = set()
self.second_cousins = set()
if self.phenotype == 2:
self.affected = True
elif self.phenotype == 1:
self.healthy = True
def check_grandparents(self, mother = None, father = None):
"""
Check if there are any grand parents.
Set the grandparents id:s
Arguments:
mother (Individual): An Individual object that represents the mother
father (Individual): An Individual object that represents the father
"""
if mother:
if mother.mother != '0':
self.grandparents[mother.mother] = ''
elif mother.father != '0':
self.grandparents[mother.father] = ''
if father:
if father.mother != '0':
self.grandparents[father.mother] = ''
elif father.father != '0':
self.grandparents[father.father] = ''
return
def to_json(self):
"""
Return the individual info in a dictionary for json.
"""
self.logger.debug("Returning json info")
individual_info = {
'family_id': self.family,
'id':self.individual_id,
'sex':str(self.sex),
'phenotype': str(self.phenotype),
'mother': self.mother,
'father': self.father,
'extra_info': self.extra_info
}
return individual_info
def to_madeline(self):
"""
Return the individual info in a madeline formated string
"""
#Convert sex to madeleine type
self.logger.debug("Returning madeline info")
if self.sex == 1:
madeline_gender = 'M'
elif self.sex == 2:
madeline_gender = 'F'
else:
madeline_gender = '.'
#Convert father to madeleine type
if self.father == '0':
madeline_father = '.'
else:
madeline_father = self.father
#Convert mother to madeleine type
if self.mother == '0':
madeline_mother = '.'
else:
madeline_mother = self.mother
#Convert phenotype to madeleine type
if self.phenotype == 1:
madeline_phenotype = 'U'
elif self.phenotype == 2:
madeline_phenotype = 'A'
else:
madeline_phenotype = '.'
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}".format(
self.family, self.individual_id, madeline_gender,
madeline_father, madeline_mother, madeline_phenotype,
self.proband, self.consultand, self.alive
)
def __repr__(self):
return "Individual(individual_id={0}, family={1}, mother={2}, " \
"father={3}, sex={4}, phenotype={5})".format(
self.individual_id, self.family, self.mother, self.father,
self.sex, self.phenotype
)
def __str__(self):
ind_info = ['ind_id:', self.individual_id,
'family:', self.family,
'mother:', self.mother,
'father:', self.father,
'sex:', str(self.sex),
'phenotype:', str(self.phenotype),
]
if len(self.siblings) > 0:
ind_info.append('siblings:')
ind_info.append(','.join(self.siblings))
return ' '.join(ind_info)
|
moonso/ped_parser | ped_parser/individual.py | Individual.to_json | python | def to_json(self):
self.logger.debug("Returning json info")
individual_info = {
'family_id': self.family,
'id':self.individual_id,
'sex':str(self.sex),
'phenotype': str(self.phenotype),
'mother': self.mother,
'father': self.father,
'extra_info': self.extra_info
}
return individual_info | Return the individual info in a dictionary for json. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/individual.py#L122-L136 | null | class Individual(object):
"""docstring for Individual"""
def __init__(self, ind, family='0', mother='0', father='0',sex='0',phenotype='0',
genetic_models=None, proband='.', consultand='.', alive='.'):
#TODO write test to throw exceptions if malformed input.
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating individual")
self.individual_id = ind #Individual Id STRING
self.logger.debug("Individual id: {0}".format(self.individual_id))
self.family = family #Family Id STRING
self.logger.debug("Family id: {0}".format(self.family))
self.mother = mother #Mother Id STRING
self.logger.debug("Mother id: {0}".format(self.mother))
self.father = father # Father Id STRING
self.logger.debug("Father id: {0}".format(self.father))
self.affected = False
self.healthy = False
self.extra_info = {}
# For madeline:
self.proband = proband
self.logger.debug("Proband: {0}".format(self.proband))
self.consultand = consultand
self.logger.debug("Consultand: {0}".format(self.consultand))
self.alive = alive
self.logger.debug("Alive: {0}".format(self.alive))
try:
self.sex = int(sex) # Sex Integer
self.logger.debug("Sex: {0}".format(self.sex))
self.phenotype = int(phenotype) # Phenotype INTEGER
self.logger.debug("Phenotype: {0}".format(self.phenotype))
except ValueError:
raise SyntaxError('Sex and phenotype have to be integers.')
self.has_parents = False
self.has_both_parents = False
if self.mother != '0':
self.has_parents = True
if self.father != '0':
self.has_both_parents = True
elif self.father != '0':
self.has_parents = True
self.logger.debug("Individual has parents: {0}".format(self.has_parents))
# These features will be added
#TODO make use of family relations:
self.siblings = set()
self.grandparents = dict()
self.first_cousins = set()
self.second_cousins = set()
if self.phenotype == 2:
self.affected = True
elif self.phenotype == 1:
self.healthy = True
def check_grandparents(self, mother = None, father = None):
"""
Check if there are any grand parents.
Set the grandparents id:s
Arguments:
mother (Individual): An Individual object that represents the mother
father (Individual): An Individual object that represents the father
"""
if mother:
if mother.mother != '0':
self.grandparents[mother.mother] = ''
elif mother.father != '0':
self.grandparents[mother.father] = ''
if father:
if father.mother != '0':
self.grandparents[father.mother] = ''
elif father.father != '0':
self.grandparents[father.father] = ''
return
def to_madeline(self):
"""
Return the individual info in a madeline formated string
"""
#Convert sex to madeleine type
self.logger.debug("Returning madeline info")
if self.sex == 1:
madeline_gender = 'M'
elif self.sex == 2:
madeline_gender = 'F'
else:
madeline_gender = '.'
#Convert father to madeleine type
if self.father == '0':
madeline_father = '.'
else:
madeline_father = self.father
#Convert mother to madeleine type
if self.mother == '0':
madeline_mother = '.'
else:
madeline_mother = self.mother
#Convert phenotype to madeleine type
if self.phenotype == 1:
madeline_phenotype = 'U'
elif self.phenotype == 2:
madeline_phenotype = 'A'
else:
madeline_phenotype = '.'
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}".format(
self.family, self.individual_id, madeline_gender,
madeline_father, madeline_mother, madeline_phenotype,
self.proband, self.consultand, self.alive
)
def __repr__(self):
return "Individual(individual_id={0}, family={1}, mother={2}, " \
"father={3}, sex={4}, phenotype={5})".format(
self.individual_id, self.family, self.mother, self.father,
self.sex, self.phenotype
)
def __str__(self):
ind_info = ['ind_id:', self.individual_id,
'family:', self.family,
'mother:', self.mother,
'father:', self.father,
'sex:', str(self.sex),
'phenotype:', str(self.phenotype),
]
if len(self.siblings) > 0:
ind_info.append('siblings:')
ind_info.append(','.join(self.siblings))
return ' '.join(ind_info)
|
moonso/ped_parser | ped_parser/individual.py | Individual.to_madeline | python | def to_madeline(self):
#Convert sex to madeleine type
self.logger.debug("Returning madeline info")
if self.sex == 1:
madeline_gender = 'M'
elif self.sex == 2:
madeline_gender = 'F'
else:
madeline_gender = '.'
#Convert father to madeleine type
if self.father == '0':
madeline_father = '.'
else:
madeline_father = self.father
#Convert mother to madeleine type
if self.mother == '0':
madeline_mother = '.'
else:
madeline_mother = self.mother
#Convert phenotype to madeleine type
if self.phenotype == 1:
madeline_phenotype = 'U'
elif self.phenotype == 2:
madeline_phenotype = 'A'
else:
madeline_phenotype = '.'
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}".format(
self.family, self.individual_id, madeline_gender,
madeline_father, madeline_mother, madeline_phenotype,
self.proband, self.consultand, self.alive
) | Return the individual info in a madeline formated string | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/individual.py#L138-L172 | null | class Individual(object):
"""docstring for Individual"""
def __init__(self, ind, family='0', mother='0', father='0',sex='0',phenotype='0',
genetic_models=None, proband='.', consultand='.', alive='.'):
#TODO write test to throw exceptions if malformed input.
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating individual")
self.individual_id = ind #Individual Id STRING
self.logger.debug("Individual id: {0}".format(self.individual_id))
self.family = family #Family Id STRING
self.logger.debug("Family id: {0}".format(self.family))
self.mother = mother #Mother Id STRING
self.logger.debug("Mother id: {0}".format(self.mother))
self.father = father # Father Id STRING
self.logger.debug("Father id: {0}".format(self.father))
self.affected = False
self.healthy = False
self.extra_info = {}
# For madeline:
self.proband = proband
self.logger.debug("Proband: {0}".format(self.proband))
self.consultand = consultand
self.logger.debug("Consultand: {0}".format(self.consultand))
self.alive = alive
self.logger.debug("Alive: {0}".format(self.alive))
try:
self.sex = int(sex) # Sex Integer
self.logger.debug("Sex: {0}".format(self.sex))
self.phenotype = int(phenotype) # Phenotype INTEGER
self.logger.debug("Phenotype: {0}".format(self.phenotype))
except ValueError:
raise SyntaxError('Sex and phenotype have to be integers.')
self.has_parents = False
self.has_both_parents = False
if self.mother != '0':
self.has_parents = True
if self.father != '0':
self.has_both_parents = True
elif self.father != '0':
self.has_parents = True
self.logger.debug("Individual has parents: {0}".format(self.has_parents))
# These features will be added
#TODO make use of family relations:
self.siblings = set()
self.grandparents = dict()
self.first_cousins = set()
self.second_cousins = set()
if self.phenotype == 2:
self.affected = True
elif self.phenotype == 1:
self.healthy = True
def check_grandparents(self, mother = None, father = None):
"""
Check if there are any grand parents.
Set the grandparents id:s
Arguments:
mother (Individual): An Individual object that represents the mother
father (Individual): An Individual object that represents the father
"""
if mother:
if mother.mother != '0':
self.grandparents[mother.mother] = ''
elif mother.father != '0':
self.grandparents[mother.father] = ''
if father:
if father.mother != '0':
self.grandparents[father.mother] = ''
elif father.father != '0':
self.grandparents[father.father] = ''
return
def to_json(self):
"""
Return the individual info in a dictionary for json.
"""
self.logger.debug("Returning json info")
individual_info = {
'family_id': self.family,
'id':self.individual_id,
'sex':str(self.sex),
'phenotype': str(self.phenotype),
'mother': self.mother,
'father': self.father,
'extra_info': self.extra_info
}
return individual_info
def to_madeline(self):
"""
Return the individual info in a madeline formated string
"""
#Convert sex to madeleine type
self.logger.debug("Returning madeline info")
if self.sex == 1:
madeline_gender = 'M'
elif self.sex == 2:
madeline_gender = 'F'
else:
madeline_gender = '.'
#Convert father to madeleine type
if self.father == '0':
madeline_father = '.'
else:
madeline_father = self.father
#Convert mother to madeleine type
if self.mother == '0':
madeline_mother = '.'
else:
madeline_mother = self.mother
#Convert phenotype to madeleine type
if self.phenotype == 1:
madeline_phenotype = 'U'
elif self.phenotype == 2:
madeline_phenotype = 'A'
else:
madeline_phenotype = '.'
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}".format(
self.family, self.individual_id, madeline_gender,
madeline_father, madeline_mother, madeline_phenotype,
self.proband, self.consultand, self.alive
)
def __repr__(self):
return "Individual(individual_id={0}, family={1}, mother={2}, " \
"father={3}, sex={4}, phenotype={5})".format(
self.individual_id, self.family, self.mother, self.father,
self.sex, self.phenotype
)
def __str__(self):
ind_info = ['ind_id:', self.individual_id,
'family:', self.family,
'mother:', self.mother,
'father:', self.father,
'sex:', str(self.sex),
'phenotype:', str(self.phenotype),
]
if len(self.siblings) > 0:
ind_info.append('siblings:')
ind_info.append(','.join(self.siblings))
return ' '.join(ind_info)
|
moonso/ped_parser | ped_parser/family.py | Family.family_check | python | def family_check(self):
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id) | Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L61-L116 | [
"def check_parent(self, parent_id, father = False):\n \"\"\"\n Check if the parent info is correct. If an individual is not present in file raise exeption.\n\n Input: An id that represents a parent\n father = True/False\n\n Raises SyntaxError if\n The parent id is not present\n T... | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.check_parent | python | def check_parent(self, parent_id, father = False):
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return | Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L120-L144 | null | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.check_siblings | python | def check_siblings(self, individual_1_id, individual_2_id):
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False | Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L146-L168 | null | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.check_cousins | python | def check_cousins(self, individual_1_id, individual_2_id):
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass | Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L170-L190 | null | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.add_individual | python | def add_individual(self, individual_object):
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return | Add an individual to the family.
Arguments:
individual_object (Individual) | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L192-L212 | null | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.get_phenotype | python | def get_phenotype(self, individual_id):
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype | Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L214-L230 | null | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/family.py | Family.to_ped | python | def to_ped(self, outfile=None):
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info)) | Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L251-L307 | [
"def to_json(self):\n \"\"\"\n Return the family in json format.\n\n The family will be represented as a list with dictionarys that\n holds information for the individuals.\n\n Returns:\n list : A list with dictionaries\n \"\"\"\n\n return [self.individuals[ind].to_json() for ind in self... | class Family(object):
"""Base class for the family parsers."""
def __init__(self, family_id, individuals = {}, models_of_inheritance=set([]),
logger=None, logfile=None, loglevel=None):
super(Family, self).__init__()
self.logger = logging.getLogger(__name__)
# Each family needs to have a family id
self.family_id = family_id
self.logger.debug("Initiating family with id:{0}".format(self.family_id))
# This is a dict with individual objects
self.individuals = individuals
self.logger.debug("Adding individuals:{0}".format(
','.join([ind for ind in self.individuals])
))
# List of models of inheritance that should be prioritized.
self.models_of_inheritance = models_of_inheritance
self.logger.debug("Adding models of inheritance:{0}".format(
','.join(self.models_of_inheritance)
)
)
#Trios are a list of sets with trios.
self.trios = []
#Duos are a list of sets with trios.
self.duos = []
# Bool if there are any relations in the family
self.no_relations = True
# Set of affected individual id:s
self.affected_individuals = set()
def family_check(self):
"""
Check if the family members break the structure of the family.
eg. nonexistent parent, wrong sex on parent etc.
Also extracts all trios found, this is of help for many at the moment
since GATK can only do phasing of trios and duos.
"""
#TODO Make some tests for these
self.logger.info("Checking family relations for {0}".format(
self.family_id)
)
for individual_id in self.individuals:
self.logger.debug("Checking individual {0}".format(individual_id))
individual = self.individuals[individual_id]
self.logger.debug("Checking if individual {0} is affected".format(
individual_id))
if individual.affected:
self.logger.debug("Found affected individual {0}".format(
individual_id)
)
self.affected_individuals.add(individual_id)
father = individual.father
mother = individual.mother
if individual.has_parents:
self.logger.debug("Individual {0} has parents".format(
individual_id))
self.no_relations = False
try:
self.check_parent(father, father=True)
self.check_parent(mother, father=False)
except PedigreeError as e:
self.logger.error(e.message)
raise e
# Check if there is a trio
if individual.has_both_parents:
self.trios.append(set([individual_id, father, mother]))
elif father != '0':
self.duos.append(set([individual_id, father]))
else:
self.duos.append(set([individual_id, mother]))
##TODO self.check_grandparents(individual)
# Annotate siblings:
for individual_2_id in self.individuals:
if individual_id != individual_2_id:
if self.check_siblings(individual_id, individual_2_id):
individual.siblings.add(individual_2_id)
##TODO elif self.check_cousins(individual_id, individual_2_id):
# individual.cousins.add(individual_2_id)
def check_parent(self, parent_id, father = False):
"""
Check if the parent info is correct. If an individual is not present in file raise exeption.
Input: An id that represents a parent
father = True/False
Raises SyntaxError if
The parent id is not present
The gender of the parent is wrong.
"""
self.logger.debug("Checking parent {0}".format(parent_id))
if parent_id != '0':
if parent_id not in self.individuals:
raise PedigreeError(self.family_id, parent_id,
'Parent is not in family.')
if father:
if self.individuals[parent_id].sex != 1:
raise PedigreeError(self.family_id, parent_id,
'Father is not specified as male.')
else:
if self.individuals[parent_id].sex != 2:
raise PedigreeError(self.family_id, parent_id,
'Mother is not specified as female.')
return
def check_siblings(self, individual_1_id, individual_2_id):
"""
Check if two family members are siblings.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are siblings
False if they are not siblings
"""
self.logger.debug("Checking if {0} and {1} are siblings".format(
individual_1_id, individual_2_id
))
ind_1 = self.individuals[individual_1_id]
ind_2 = self.individuals[individual_2_id]
if ((ind_1.father != '0' and ind_1.father == ind_2.father) or
(ind_1.mother != '0' and ind_1.mother == ind_2.mother)):
return True
else:
return False
def check_cousins(self, individual_1_id, individual_2_id):
"""
Check if two family members are cousins.
If two individuals share any grandparents they are cousins.
Arguments:
individual_1_id (str): The id of an individual
individual_2_id (str): The id of an individual
Returns:
bool : True if the individuals are cousins
False if they are not cousins
"""
self.logger.debug("Checking if {0} and {1} are cousins".format(
individual_1_id, individual_2_id
))
#TODO check if any of the parents are siblings
pass
def add_individual(self, individual_object):
"""
Add an individual to the family.
Arguments:
individual_object (Individual)
"""
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
def get_trios(self):
"""
Return the trios found in family
"""
return self.trios
def to_json(self):
"""
Return the family in json format.
The family will be represented as a list with dictionarys that
holds information for the individuals.
Returns:
list : A list with dictionaries
"""
return [self.individuals[ind].to_json() for ind in self.individuals]
def to_ped(self, outfile=None):
"""
Print the individuals of the family in ped format
The header will be the original ped header plus all headers found in
extra info of the individuals
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
if outfile:
outfile.write('\t'.join(ped_header)+'\n')
else:
print('\t'.join(ped_header))
for individual in self.to_json():
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
if outfile:
outfile.write('\t'.join(ped_info)+'\n')
else:
print('\t'.join(ped_info))
def __repr__(self):
return "Family(family_id={0}, individuals={1}, " \
"models_of_inheritance={2}".format(
self.family_id, self.individuals.keys(),
self.models_of_inheritance
)
def __str__(self):
"""Print the family members of this family"""
family = list(self.individuals.keys())
return "\t".join(family)
|
moonso/ped_parser | ped_parser/log.py | init_log | python | def init_log(logger, filename=None, loglevel=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s: %(name)s: %(message)s'
)
if loglevel:
logger.setLevel(getattr(logging, loglevel))
# We will allways print warnings and higher to stderr
ch = logging.StreamHandler()
ch.setLevel('WARNING')
ch.setFormatter(formatter)
if filename:
fi = logging.FileHandler(filename, encoding='utf-8')
if loglevel:
fi.setLevel(getattr(logging, loglevel))
fi.setFormatter(formatter)
logger.addHandler(fi)
# If no logfile is provided we print all log messages that the user has
# defined to stderr
else:
if loglevel:
ch.setLevel(getattr(logging, loglevel))
logger.addHandler(ch) | Initializes the log file in the proper format.
Arguments:
filename (str): Path to a file. Or None if logging is to
be disabled.
loglevel (str): Determines the level of the log output. | train | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/log.py#L5-L40 | null | import os
import sys
import logging
def init_log(logger, filename=None, loglevel=None):
"""
Initializes the log file in the proper format.
Arguments:
filename (str): Path to a file. Or None if logging is to
be disabled.
loglevel (str): Determines the level of the log output.
"""
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s: %(name)s: %(message)s'
)
if loglevel:
logger.setLevel(getattr(logging, loglevel))
# We will allways print warnings and higher to stderr
ch = logging.StreamHandler()
ch.setLevel('WARNING')
ch.setFormatter(formatter)
if filename:
fi = logging.FileHandler(filename, encoding='utf-8')
if loglevel:
fi.setLevel(getattr(logging, loglevel))
fi.setFormatter(formatter)
logger.addHandler(fi)
# If no logfile is provided we print all log messages that the user has
# defined to stderr
else:
if loglevel:
ch.setLevel(getattr(logging, loglevel))
logger.addHandler(ch)
def get_log_stream(logger):
"""
Returns a log stream.
If there is a file handler this stream will be used.
If there is no logfile return the stderr log stream
Returns:
A stream to the root log file or stderr stream.
"""
file_stream = None
log_stream = None
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
file_stream = handler.stream
else:
log_stream = handler.stream
if file_stream:
return file_stream
return log_stream |
moluwole/Bast | bast/migration.py | Migration.get_config | python | def get_config():
db_type = os.environ['DB_TYPE']
db_host = os.environ['DB_HOST']
db_user = os.environ['DB_USER']
db_database = os.environ['DB_NAME']
db_password = os.environ['DB_PASSWORD']
db_prefix = os.environ['DB_PREFIX']
check = Migration.check_packages(db_type)
return check, {
db_type: {
'driver': db_type.strip(),
'host': db_host.strip(),
'database': db_database.strip(),
'user': db_user.strip(),
'password': db_password.strip(),
'prefix': db_prefix.strip()
}
} | Gets the config from the os.environ. This is used to create the config dict for use by the ORM
:return: str, dict | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/migration.py#L76-L99 | null | class Migration(Migrator):
"""
Handles the actions to be performed on the Migration files
"""
def __init__(self):
"""
Initialize the Orator Migrator Class. Check if the migration table has been created. If not, Create the
Repository
"""
check, config = self.get_config()
if not check:
print("Error Occurred")
else:
self.manager = DatabaseManager(config=config)
self.path = os.path.abspath('.') + "/database/migrations/"
self.repository = DatabaseMigrationRepository(resolver=self.manager, table='migrations')
if not self.repository.repository_exists():
self.repository.create_repository()
super().__init__(self.repository, self.manager)
def run_(self, pretend):
"""
Run the migration file
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return:
"""
self.run(self.path, pretend=pretend)
def rollback_(self, pretend):
"""
Roll Back the Last Migration
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return: int
"""
return self.rollback(self.path, pretend)
def reset_(self, pretend):
"""
Reset all the migrations that have been done
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return: int
"""
return self.reset(self.path, pretend)
@staticmethod
@staticmethod
def check_packages(db_name):
"""
Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return:
"""
print('Checking for required Database Driver')
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
# print(installed_packages)
if db_name.lower() == 'mysql':
if 'PyMySQL' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install pymysql')
if db_name.lower() == 'postgresql':
if 'psycopg2-binary' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install psycopg2-binary')
return True
|
moluwole/Bast | bast/migration.py | Migration.check_packages | python | def check_packages(db_name):
print('Checking for required Database Driver')
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
# print(installed_packages)
if db_name.lower() == 'mysql':
if 'PyMySQL' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install pymysql')
if db_name.lower() == 'postgresql':
if 'psycopg2-binary' not in installed_packages:
print('Installing required Database Driver')
os.system('pip install psycopg2-binary')
return True | Check if the driver for the user defined host is available. If it is not available, download it using PIP
:param db_name:
:return: | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/migration.py#L102-L125 | null | class Migration(Migrator):
"""
Handles the actions to be performed on the Migration files
"""
def __init__(self):
"""
Initialize the Orator Migrator Class. Check if the migration table has been created. If not, Create the
Repository
"""
check, config = self.get_config()
if not check:
print("Error Occurred")
else:
self.manager = DatabaseManager(config=config)
self.path = os.path.abspath('.') + "/database/migrations/"
self.repository = DatabaseMigrationRepository(resolver=self.manager, table='migrations')
if not self.repository.repository_exists():
self.repository.create_repository()
super().__init__(self.repository, self.manager)
def run_(self, pretend):
"""
Run the migration file
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return:
"""
self.run(self.path, pretend=pretend)
def rollback_(self, pretend):
"""
Roll Back the Last Migration
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return: int
"""
return self.rollback(self.path, pretend)
def reset_(self, pretend):
"""
Reset all the migrations that have been done
:param pretend: Determines whether to run the migration as a Simulation or not. Defaults to False
:return: int
"""
return self.reset(self.path, pretend)
@staticmethod
def get_config():
"""
Gets the config from the os.environ. This is used to create the config dict for use by the ORM
:return: str, dict
"""
db_type = os.environ['DB_TYPE']
db_host = os.environ['DB_HOST']
db_user = os.environ['DB_USER']
db_database = os.environ['DB_NAME']
db_password = os.environ['DB_PASSWORD']
db_prefix = os.environ['DB_PREFIX']
check = Migration.check_packages(db_type)
return check, {
db_type: {
'driver': db_type.strip(),
'host': db_host.strip(),
'database': db_database.strip(),
'user': db_user.strip(),
'password': db_password.strip(),
'prefix': db_prefix.strip()
}
}
@staticmethod
|
moluwole/Bast | bast/cli.py | controller_creatr | python | def controller_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:controller command')
return
path = os.path.abspath('.') + '/controller'
if not os.path.exists(path):
os.makedirs(path)
# if os.path.isfile(path + )
file_name = str(filename + '.py')
if os.path.isfile(path+"/" + file_name):
click.echo(Fore.WHITE + Back.RED + "ERROR: Controller file exists")
return
controller_file = open(os.path.abspath('.') + '/controller/' + file_name, 'w+')
compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass"
controller_file.write(compose)
controller_file.close()
click.echo(Fore.GREEN + "Controller " + filename + " created successfully") | Name of the controller file to be created | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L41-L61 | [
"def check():\n server = os.path.abspath('.') + \"/server.py\"\n config = os.path.abspath('.') + \"/config\"\n\n if os.path.exists(server) and os.path.exists(config):\n return True\n\n return False\n"
] | """
Bast Web Framework
(c) Majiyagbe Oluwole <oluwole564@gmail.com>
For full copyright and license information, view the LICENSE distributed with the Source Code
"""
import os
from git import Repo
from .bast import __version__
from .migration import CreateMigration, Migration
import shutil
import click
import re
from subprocess import call
from base64 import b64encode
from colorama import init, Fore, Back
""" Handles the CLI commands and their respective Arguments """
def check():
server = os.path.abspath('.') + "/server.py"
config = os.path.abspath('.') + "/config"
if os.path.exists(server) and os.path.exists(config):
return True
return False
@click.group()
@click.version_option(__version__)
def main():
init(autoreset=True)
pass
@main.command('create:controller', short_help='Creates a Controller File')
@click.argument('filename', required=1)
@main.command('create:middleware', short_help="Creates a Middleware")
@click.argument('filename', required=1)
def middleware_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:middleware command')
return
path = os.path.abspath('.') + '/middleware'
if not os.path.exists(path):
os.makedirs(path)
file_name = str(filename) + '.py'
middleware_file = open(os.path.abspath('.') + '/middleware/' + file_name, 'w+')
compose = "class " + filename + ":\n def handle(self, request):\n return True"
middleware_file.write(compose)
middleware_file.close()
click.echo(Fore.GREEN + "Middleware " + filename + " created successfully")
@main.command('create:view', short_help="Create a View File")
@click.argument('filename', required=1)
def view_creatr(filename):
"""Name of the View File to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:view command')
return
path = os.path.abspath('.') + '/public/templates'
if not os.path.exists(path):
os.makedirs(path)
filename_ = str(filename + ".html").lower()
view_file = open(path + "/" + filename_, 'w+')
view_file.write("")
view_file.close()
click.echo(Fore.GREEN + "View file " + filename_ + "created in public/template folder")
@main.command('generate:key', short_help="Generate the APP KEY")
@click.argument('path', required=1)
def make_key(path):
env_path = os.path.join(path, '.env')
if not os.path.isfile(env_path):
click.echo(Fore.RED + ".env file not found. Scaffold a project to generate a key")
return
key = b64encode(os.urandom(32)).decode('utf-8')
with open(env_path, 'r') as file:
env_data = file.readlines()
for line_number, line in enumerate(env_data):
if line.startswith('APP_KEY='):
env_data[line_number] = 'APP_KEY={0}\n'.format(key)
break
with open(env_path, 'w') as file:
file.writelines(env_data)
click.echo(Fore.GREEN + "Key Generated successfully: " + key)
@main.command('run', short_help="Run your Bast Server")
@click.option('--serverfile', help="Name of the file to run", default='server.py')
def run(serverfile):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to use the "run" command')
return
call(['python', serverfile])
@main.command('new', short_help="Create a new Bast Project")
@click.argument('projectname', required=1)
def create_new(projectname):
"""Name of the project"""
git_url = "https://github.com/moluwole/Bast_skeleton"
path = os.path.abspath('.') + "/" + projectname
if not os.path.exists(path):
os.makedirs(path)
click.echo(Fore.GREEN + ' ___ ___ __________')
click.echo(Fore.GREEN + ' / _ )/ _ | / __/_ __/')
click.echo(Fore.GREEN + ' / _ / __ |_\ \ / /')
click.echo(Fore.GREEN + '/____/_/ |_/___/ /_/')
click.echo(Fore.GREEN + "Creating Project at %s.... " % path)
click.echo(Fore.GREEN + "Pulling Project Skeleton from Repo")
try:
Repo.clone_from(git_url, path)
click.echo(Fore.GREEN + "Setting up project")
shutil.rmtree(path + "/.git")
if not os.path.exists('/.env'):
shutil.copy(path + '/.env.example', path + '/.env')
env_file = path + "/.env"
if not os.path.isfile(env_file):
shutil.copy('.env.example', '.env')
call(['panther', 'generate:key', path])
click.echo(Fore.GREEN + "New Bast Project created at %s " % path)
except Exception as e:
click.echo(Fore.RED + "An error occurred creating a new project. Try Again.\n Reason: {}".format(e))
@main.command('create:migration', short_help="Create a migration file")
@click.argument('migration_file', required=1)
@click.option('--create', default=True, help="Create the table. OPTIONAL")
@click.option('--table', default=None, help="Name of the table to be created. OPTIONAL")
def migration_creatr(migration_file, create, table):
"""Name of the migration file"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:migration command')
return
migration = CreateMigration()
if table is None:
table = snake_case(migration_file)
file = migration.create_file(snake_case(migration_file), table=table, create=create)
click.echo(Fore.GREEN + 'Migration file created at %s' % file)
@main.command('migration:run', short_help="Run Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_run(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:run command')
return
migration = Migration()
migration.run_(pretend)
click.echo(Fore.GREEN + 'Migration Run successful')
@main.command('migration:rollback', short_help="Roll Back last Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_rollback(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.rollback_(pretend)
click.echo(Fore.GREEN + 'Roll Back Executed. %s migrations rolled back' % count)
@main.command('migration:reset', short_help="Reset Migration")
@click.option('--pretend', default=False, help="Simulate the Migration")
def migration_reset(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.reset_(pretend)
click.echo(Fore.GREEN + 'Migration Reset successful. %s migrations has been reset' % count)
@main.command('create:model', short_help="Create Model File")
@click.argument('model_file', required=1)
@click.option('--migration', default=True, help="Generate Migration File Also")
def model_creatr(model_file, migration):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:model command')
return
filename = snake_case(model_file) + ".py"
directory_path = os.path.abspath('.') + '/models/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
path = os.path.abspath('.') + '/models/' + filename
file_open = open(path, 'w+')
compose = 'from bast import Models\n\nclass %s(Models):\n __table__ = \'%s\'' \
% (model_file, snake_case(model_file))
file_open.write(compose)
file_open.close()
if migration:
migrate = CreateMigration()
migrate.create_file(name=snake_case(model_file), table=snake_case(model_file), create=True)
click.echo(Fore.GREEN + '%s has been created at /models' % filename)
def snake_case(string_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
moluwole/Bast | bast/cli.py | view_creatr | python | def view_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:view command')
return
path = os.path.abspath('.') + '/public/templates'
if not os.path.exists(path):
os.makedirs(path)
filename_ = str(filename + ".html").lower()
view_file = open(path + "/" + filename_, 'w+')
view_file.write("")
view_file.close()
click.echo(Fore.GREEN + "View file " + filename_ + "created in public/template folder") | Name of the View File to be created | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L85-L99 | [
"def check():\n server = os.path.abspath('.') + \"/server.py\"\n config = os.path.abspath('.') + \"/config\"\n\n if os.path.exists(server) and os.path.exists(config):\n return True\n\n return False\n"
] | """
Bast Web Framework
(c) Majiyagbe Oluwole <oluwole564@gmail.com>
For full copyright and license information, view the LICENSE distributed with the Source Code
"""
import os
from git import Repo
from .bast import __version__
from .migration import CreateMigration, Migration
import shutil
import click
import re
from subprocess import call
from base64 import b64encode
from colorama import init, Fore, Back
""" Handles the CLI commands and their respective Arguments """
def check():
server = os.path.abspath('.') + "/server.py"
config = os.path.abspath('.') + "/config"
if os.path.exists(server) and os.path.exists(config):
return True
return False
@click.group()
@click.version_option(__version__)
def main():
init(autoreset=True)
pass
@main.command('create:controller', short_help='Creates a Controller File')
@click.argument('filename', required=1)
def controller_creatr(filename):
"""Name of the controller file to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:controller command')
return
path = os.path.abspath('.') + '/controller'
if not os.path.exists(path):
os.makedirs(path)
# if os.path.isfile(path + )
file_name = str(filename + '.py')
if os.path.isfile(path+"/" + file_name):
click.echo(Fore.WHITE + Back.RED + "ERROR: Controller file exists")
return
controller_file = open(os.path.abspath('.') + '/controller/' + file_name, 'w+')
compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass"
controller_file.write(compose)
controller_file.close()
click.echo(Fore.GREEN + "Controller " + filename + " created successfully")
@main.command('create:middleware', short_help="Creates a Middleware")
@click.argument('filename', required=1)
def middleware_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:middleware command')
return
path = os.path.abspath('.') + '/middleware'
if not os.path.exists(path):
os.makedirs(path)
file_name = str(filename) + '.py'
middleware_file = open(os.path.abspath('.') + '/middleware/' + file_name, 'w+')
compose = "class " + filename + ":\n def handle(self, request):\n return True"
middleware_file.write(compose)
middleware_file.close()
click.echo(Fore.GREEN + "Middleware " + filename + " created successfully")
@main.command('create:view', short_help="Create a View File")
@click.argument('filename', required=1)
@main.command('generate:key', short_help="Generate the APP KEY")
@click.argument('path', required=1)
def make_key(path):
env_path = os.path.join(path, '.env')
if not os.path.isfile(env_path):
click.echo(Fore.RED + ".env file not found. Scaffold a project to generate a key")
return
key = b64encode(os.urandom(32)).decode('utf-8')
with open(env_path, 'r') as file:
env_data = file.readlines()
for line_number, line in enumerate(env_data):
if line.startswith('APP_KEY='):
env_data[line_number] = 'APP_KEY={0}\n'.format(key)
break
with open(env_path, 'w') as file:
file.writelines(env_data)
click.echo(Fore.GREEN + "Key Generated successfully: " + key)
@main.command('run', short_help="Run your Bast Server")
@click.option('--serverfile', help="Name of the file to run", default='server.py')
def run(serverfile):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to use the "run" command')
return
call(['python', serverfile])
@main.command('new', short_help="Create a new Bast Project")
@click.argument('projectname', required=1)
def create_new(projectname):
"""Name of the project"""
git_url = "https://github.com/moluwole/Bast_skeleton"
path = os.path.abspath('.') + "/" + projectname
if not os.path.exists(path):
os.makedirs(path)
click.echo(Fore.GREEN + ' ___ ___ __________')
click.echo(Fore.GREEN + ' / _ )/ _ | / __/_ __/')
click.echo(Fore.GREEN + ' / _ / __ |_\ \ / /')
click.echo(Fore.GREEN + '/____/_/ |_/___/ /_/')
click.echo(Fore.GREEN + "Creating Project at %s.... " % path)
click.echo(Fore.GREEN + "Pulling Project Skeleton from Repo")
try:
Repo.clone_from(git_url, path)
click.echo(Fore.GREEN + "Setting up project")
shutil.rmtree(path + "/.git")
if not os.path.exists('/.env'):
shutil.copy(path + '/.env.example', path + '/.env')
env_file = path + "/.env"
if not os.path.isfile(env_file):
shutil.copy('.env.example', '.env')
call(['panther', 'generate:key', path])
click.echo(Fore.GREEN + "New Bast Project created at %s " % path)
except Exception as e:
click.echo(Fore.RED + "An error occurred creating a new project. Try Again.\n Reason: {}".format(e))
@main.command('create:migration', short_help="Create a migration file")
@click.argument('migration_file', required=1)
@click.option('--create', default=True, help="Create the table. OPTIONAL")
@click.option('--table', default=None, help="Name of the table to be created. OPTIONAL")
def migration_creatr(migration_file, create, table):
"""Name of the migration file"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:migration command')
return
migration = CreateMigration()
if table is None:
table = snake_case(migration_file)
file = migration.create_file(snake_case(migration_file), table=table, create=create)
click.echo(Fore.GREEN + 'Migration file created at %s' % file)
@main.command('migration:run', short_help="Run Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_run(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:run command')
return
migration = Migration()
migration.run_(pretend)
click.echo(Fore.GREEN + 'Migration Run successful')
@main.command('migration:rollback', short_help="Roll Back last Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_rollback(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.rollback_(pretend)
click.echo(Fore.GREEN + 'Roll Back Executed. %s migrations rolled back' % count)
@main.command('migration:reset', short_help="Reset Migration")
@click.option('--pretend', default=False, help="Simulate the Migration")
def migration_reset(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.reset_(pretend)
click.echo(Fore.GREEN + 'Migration Reset successful. %s migrations has been reset' % count)
@main.command('create:model', short_help="Create Model File")
@click.argument('model_file', required=1)
@click.option('--migration', default=True, help="Generate Migration File Also")
def model_creatr(model_file, migration):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:model command')
return
filename = snake_case(model_file) + ".py"
directory_path = os.path.abspath('.') + '/models/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
path = os.path.abspath('.') + '/models/' + filename
file_open = open(path, 'w+')
compose = 'from bast import Models\n\nclass %s(Models):\n __table__ = \'%s\'' \
% (model_file, snake_case(model_file))
file_open.write(compose)
file_open.close()
if migration:
migrate = CreateMigration()
migrate.create_file(name=snake_case(model_file), table=snake_case(model_file), create=True)
click.echo(Fore.GREEN + '%s has been created at /models' % filename)
def snake_case(string_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
moluwole/Bast | bast/cli.py | create_new | python | def create_new(projectname):
git_url = "https://github.com/moluwole/Bast_skeleton"
path = os.path.abspath('.') + "/" + projectname
if not os.path.exists(path):
os.makedirs(path)
click.echo(Fore.GREEN + ' ___ ___ __________')
click.echo(Fore.GREEN + ' / _ )/ _ | / __/_ __/')
click.echo(Fore.GREEN + ' / _ / __ |_\ \ / /')
click.echo(Fore.GREEN + '/____/_/ |_/___/ /_/')
click.echo(Fore.GREEN + "Creating Project at %s.... " % path)
click.echo(Fore.GREEN + "Pulling Project Skeleton from Repo")
try:
Repo.clone_from(git_url, path)
click.echo(Fore.GREEN + "Setting up project")
shutil.rmtree(path + "/.git")
if not os.path.exists('/.env'):
shutil.copy(path + '/.env.example', path + '/.env')
env_file = path + "/.env"
if not os.path.isfile(env_file):
shutil.copy('.env.example', '.env')
call(['panther', 'generate:key', path])
click.echo(Fore.GREEN + "New Bast Project created at %s " % path)
except Exception as e:
click.echo(Fore.RED + "An error occurred creating a new project. Try Again.\n Reason: {}".format(e)) | Name of the project | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L137-L168 | null | """
Bast Web Framework
(c) Majiyagbe Oluwole <oluwole564@gmail.com>
For full copyright and license information, view the LICENSE distributed with the Source Code
"""
import os
from git import Repo
from .bast import __version__
from .migration import CreateMigration, Migration
import shutil
import click
import re
from subprocess import call
from base64 import b64encode
from colorama import init, Fore, Back
""" Handles the CLI commands and their respective Arguments """
def check():
server = os.path.abspath('.') + "/server.py"
config = os.path.abspath('.') + "/config"
if os.path.exists(server) and os.path.exists(config):
return True
return False
@click.group()
@click.version_option(__version__)
def main():
init(autoreset=True)
pass
@main.command('create:controller', short_help='Creates a Controller File')
@click.argument('filename', required=1)
def controller_creatr(filename):
"""Name of the controller file to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:controller command')
return
path = os.path.abspath('.') + '/controller'
if not os.path.exists(path):
os.makedirs(path)
# if os.path.isfile(path + )
file_name = str(filename + '.py')
if os.path.isfile(path+"/" + file_name):
click.echo(Fore.WHITE + Back.RED + "ERROR: Controller file exists")
return
controller_file = open(os.path.abspath('.') + '/controller/' + file_name, 'w+')
compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass"
controller_file.write(compose)
controller_file.close()
click.echo(Fore.GREEN + "Controller " + filename + " created successfully")
@main.command('create:middleware', short_help="Creates a Middleware")
@click.argument('filename', required=1)
def middleware_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:middleware command')
return
path = os.path.abspath('.') + '/middleware'
if not os.path.exists(path):
os.makedirs(path)
file_name = str(filename) + '.py'
middleware_file = open(os.path.abspath('.') + '/middleware/' + file_name, 'w+')
compose = "class " + filename + ":\n def handle(self, request):\n return True"
middleware_file.write(compose)
middleware_file.close()
click.echo(Fore.GREEN + "Middleware " + filename + " created successfully")
@main.command('create:view', short_help="Create a View File")
@click.argument('filename', required=1)
def view_creatr(filename):
"""Name of the View File to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:view command')
return
path = os.path.abspath('.') + '/public/templates'
if not os.path.exists(path):
os.makedirs(path)
filename_ = str(filename + ".html").lower()
view_file = open(path + "/" + filename_, 'w+')
view_file.write("")
view_file.close()
click.echo(Fore.GREEN + "View file " + filename_ + "created in public/template folder")
@main.command('generate:key', short_help="Generate the APP KEY")
@click.argument('path', required=1)
def make_key(path):
env_path = os.path.join(path, '.env')
if not os.path.isfile(env_path):
click.echo(Fore.RED + ".env file not found. Scaffold a project to generate a key")
return
key = b64encode(os.urandom(32)).decode('utf-8')
with open(env_path, 'r') as file:
env_data = file.readlines()
for line_number, line in enumerate(env_data):
if line.startswith('APP_KEY='):
env_data[line_number] = 'APP_KEY={0}\n'.format(key)
break
with open(env_path, 'w') as file:
file.writelines(env_data)
click.echo(Fore.GREEN + "Key Generated successfully: " + key)
@main.command('run', short_help="Run your Bast Server")
@click.option('--serverfile', help="Name of the file to run", default='server.py')
def run(serverfile):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to use the "run" command')
return
call(['python', serverfile])
@main.command('new', short_help="Create a new Bast Project")
@click.argument('projectname', required=1)
@main.command('create:migration', short_help="Create a migration file")
@click.argument('migration_file', required=1)
@click.option('--create', default=True, help="Create the table. OPTIONAL")
@click.option('--table', default=None, help="Name of the table to be created. OPTIONAL")
def migration_creatr(migration_file, create, table):
"""Name of the migration file"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:migration command')
return
migration = CreateMigration()
if table is None:
table = snake_case(migration_file)
file = migration.create_file(snake_case(migration_file), table=table, create=create)
click.echo(Fore.GREEN + 'Migration file created at %s' % file)
@main.command('migration:run', short_help="Run Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_run(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:run command')
return
migration = Migration()
migration.run_(pretend)
click.echo(Fore.GREEN + 'Migration Run successful')
@main.command('migration:rollback', short_help="Roll Back last Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_rollback(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.rollback_(pretend)
click.echo(Fore.GREEN + 'Roll Back Executed. %s migrations rolled back' % count)
@main.command('migration:reset', short_help="Reset Migration")
@click.option('--pretend', default=False, help="Simulate the Migration")
def migration_reset(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.reset_(pretend)
click.echo(Fore.GREEN + 'Migration Reset successful. %s migrations has been reset' % count)
@main.command('create:model', short_help="Create Model File")
@click.argument('model_file', required=1)
@click.option('--migration', default=True, help="Generate Migration File Also")
def model_creatr(model_file, migration):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:model command')
return
filename = snake_case(model_file) + ".py"
directory_path = os.path.abspath('.') + '/models/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
path = os.path.abspath('.') + '/models/' + filename
file_open = open(path, 'w+')
compose = 'from bast import Models\n\nclass %s(Models):\n __table__ = \'%s\'' \
% (model_file, snake_case(model_file))
file_open.write(compose)
file_open.close()
if migration:
migrate = CreateMigration()
migrate.create_file(name=snake_case(model_file), table=snake_case(model_file), create=True)
click.echo(Fore.GREEN + '%s has been created at /models' % filename)
def snake_case(string_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
moluwole/Bast | bast/cli.py | migration_creatr | python | def migration_creatr(migration_file, create, table):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:migration command')
return
migration = CreateMigration()
if table is None:
table = snake_case(migration_file)
file = migration.create_file(snake_case(migration_file), table=table, create=create)
click.echo(Fore.GREEN + 'Migration file created at %s' % file) | Name of the migration file | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/cli.py#L175-L185 | [
"def check():\n server = os.path.abspath('.') + \"/server.py\"\n config = os.path.abspath('.') + \"/config\"\n\n if os.path.exists(server) and os.path.exists(config):\n return True\n\n return False\n"
] | """
Bast Web Framework
(c) Majiyagbe Oluwole <oluwole564@gmail.com>
For full copyright and license information, view the LICENSE distributed with the Source Code
"""
import os
from git import Repo
from .bast import __version__
from .migration import CreateMigration, Migration
import shutil
import click
import re
from subprocess import call
from base64 import b64encode
from colorama import init, Fore, Back
""" Handles the CLI commands and their respective Arguments """
def check():
server = os.path.abspath('.') + "/server.py"
config = os.path.abspath('.') + "/config"
if os.path.exists(server) and os.path.exists(config):
return True
return False
@click.group()
@click.version_option(__version__)
def main():
init(autoreset=True)
pass
@main.command('create:controller', short_help='Creates a Controller File')
@click.argument('filename', required=1)
def controller_creatr(filename):
"""Name of the controller file to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:controller command')
return
path = os.path.abspath('.') + '/controller'
if not os.path.exists(path):
os.makedirs(path)
# if os.path.isfile(path + )
file_name = str(filename + '.py')
if os.path.isfile(path+"/" + file_name):
click.echo(Fore.WHITE + Back.RED + "ERROR: Controller file exists")
return
controller_file = open(os.path.abspath('.') + '/controller/' + file_name, 'w+')
compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass"
controller_file.write(compose)
controller_file.close()
click.echo(Fore.GREEN + "Controller " + filename + " created successfully")
@main.command('create:middleware', short_help="Creates a Middleware")
@click.argument('filename', required=1)
def middleware_creatr(filename):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:middleware command')
return
path = os.path.abspath('.') + '/middleware'
if not os.path.exists(path):
os.makedirs(path)
file_name = str(filename) + '.py'
middleware_file = open(os.path.abspath('.') + '/middleware/' + file_name, 'w+')
compose = "class " + filename + ":\n def handle(self, request):\n return True"
middleware_file.write(compose)
middleware_file.close()
click.echo(Fore.GREEN + "Middleware " + filename + " created successfully")
@main.command('create:view', short_help="Create a View File")
@click.argument('filename', required=1)
def view_creatr(filename):
"""Name of the View File to be created"""
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:view command')
return
path = os.path.abspath('.') + '/public/templates'
if not os.path.exists(path):
os.makedirs(path)
filename_ = str(filename + ".html").lower()
view_file = open(path + "/" + filename_, 'w+')
view_file.write("")
view_file.close()
click.echo(Fore.GREEN + "View file " + filename_ + "created in public/template folder")
@main.command('generate:key', short_help="Generate the APP KEY")
@click.argument('path', required=1)
def make_key(path):
env_path = os.path.join(path, '.env')
if not os.path.isfile(env_path):
click.echo(Fore.RED + ".env file not found. Scaffold a project to generate a key")
return
key = b64encode(os.urandom(32)).decode('utf-8')
with open(env_path, 'r') as file:
env_data = file.readlines()
for line_number, line in enumerate(env_data):
if line.startswith('APP_KEY='):
env_data[line_number] = 'APP_KEY={0}\n'.format(key)
break
with open(env_path, 'w') as file:
file.writelines(env_data)
click.echo(Fore.GREEN + "Key Generated successfully: " + key)
@main.command('run', short_help="Run your Bast Server")
@click.option('--serverfile', help="Name of the file to run", default='server.py')
def run(serverfile):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to use the "run" command')
return
call(['python', serverfile])
@main.command('new', short_help="Create a new Bast Project")
@click.argument('projectname', required=1)
def create_new(projectname):
"""Name of the project"""
git_url = "https://github.com/moluwole/Bast_skeleton"
path = os.path.abspath('.') + "/" + projectname
if not os.path.exists(path):
os.makedirs(path)
click.echo(Fore.GREEN + ' ___ ___ __________')
click.echo(Fore.GREEN + ' / _ )/ _ | / __/_ __/')
click.echo(Fore.GREEN + ' / _ / __ |_\ \ / /')
click.echo(Fore.GREEN + '/____/_/ |_/___/ /_/')
click.echo(Fore.GREEN + "Creating Project at %s.... " % path)
click.echo(Fore.GREEN + "Pulling Project Skeleton from Repo")
try:
Repo.clone_from(git_url, path)
click.echo(Fore.GREEN + "Setting up project")
shutil.rmtree(path + "/.git")
if not os.path.exists('/.env'):
shutil.copy(path + '/.env.example', path + '/.env')
env_file = path + "/.env"
if not os.path.isfile(env_file):
shutil.copy('.env.example', '.env')
call(['panther', 'generate:key', path])
click.echo(Fore.GREEN + "New Bast Project created at %s " % path)
except Exception as e:
click.echo(Fore.RED + "An error occurred creating a new project. Try Again.\n Reason: {}".format(e))
@main.command('create:migration', short_help="Create a migration file")
@click.argument('migration_file', required=1)
@click.option('--create', default=True, help="Create the table. OPTIONAL")
@click.option('--table', default=None, help="Name of the table to be created. OPTIONAL")
@main.command('migration:run', short_help="Run Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_run(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:run command')
return
migration = Migration()
migration.run_(pretend)
click.echo(Fore.GREEN + 'Migration Run successful')
@main.command('migration:rollback', short_help="Roll Back last Migration")
@click.option('--pretend', default=False, help="Simulates the Migration")
def migration_rollback(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.rollback_(pretend)
click.echo(Fore.GREEN + 'Roll Back Executed. %s migrations rolled back' % count)
@main.command('migration:reset', short_help="Reset Migration")
@click.option('--pretend', default=False, help="Simulate the Migration")
def migration_reset(pretend):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the migration:rollback command')
return
migration = Migration()
count = migration.reset_(pretend)
click.echo(Fore.GREEN + 'Migration Reset successful. %s migrations has been reset' % count)
@main.command('create:model', short_help="Create Model File")
@click.argument('model_file', required=1)
@click.option('--migration', default=True, help="Generate Migration File Also")
def model_creatr(model_file, migration):
if not check():
click.echo(Fore.RED + 'ERROR: Ensure you are in a bast app to run the create:model command')
return
filename = snake_case(model_file) + ".py"
directory_path = os.path.abspath('.') + '/models/'
if not os.path.exists(directory_path):
os.makedirs(directory_path)
path = os.path.abspath('.') + '/models/' + filename
file_open = open(path, 'w+')
compose = 'from bast import Models\n\nclass %s(Models):\n __table__ = \'%s\'' \
% (model_file, snake_case(model_file))
file_open.write(compose)
file_open.close()
if migration:
migrate = CreateMigration()
migrate.create_file(name=snake_case(model_file), table=snake_case(model_file), create=True)
click.echo(Fore.GREEN + '%s has been created at /models' % filename)
def snake_case(string_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
moluwole/Bast | bast/controller.py | Controller.write_error | python | def write_error(self, status_code, **kwargs):
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content) | Handle Exceptions from the server. Formats the HTML into readable form | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L37-L51 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.view | python | def view(self, template_name, kwargs=None):
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content) | Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html') | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L53-L73 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.initialize | python | def initialize(self, method, middleware, request_type):
self.method = method
self.middleware = middleware
self.request_type = request_type | Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L98-L105 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.only | python | def only(self, arguments):
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data | returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable. | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L107-L129 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.all | python | def all(self):
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data | Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L131-L153 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.except_ | python | def except_(self, arguments):
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data | returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name`` | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L155-L180 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.json | python | def json(self, data):
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json') | Encodes the dictionary being passed to JSON and sets the Header to application/json | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L182-L187 | [
"def encode(cls, json):\n \"\"\"\n Checks the type of data passed to the be encoded.\n :param json:\n :return:\n \"\"\"\n if type(json) is dict:\n return json_encode(json)\n return json_encode({'message': 'Not a Dictionary object'})\n"
] | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def get_argument(self, name, default=None, strip=True):
"""
Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode
"""
return self._get_argument(name, default, self.request.arguments, strip)[name]
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/controller.py | Controller.get_argument | python | def get_argument(self, name, default=None, strip=True):
return self._get_argument(name, default, self.request.arguments, strip)[name] | Returns the value of the argument with the given name.
If default is not provided, returns ``None``
If the argument appears in the url more than once, we return the last value.
The returned value is always unicode | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L245-L255 | null | class Controller(RequestHandler, TemplateRendering):
method = None
middleware = None
providers = {}
request_type = None
def __init__(self, application, request, **kwargs):
super(Controller, self).__init__(application, request, **kwargs)
self.request = request
self.application = application
self.session_driver = os.getenv("SESSION")
self.session = Bast.session['session']
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content)
def view(self, template_name, kwargs=None):
"""
Used to render template to view
Sample usage
+++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
self.view('index.html')
"""
if kwargs is None:
kwargs = dict()
self.add_('session', self.session)
content = self.render_template(template_name, **kwargs)
self.write(content)
def data_received(self, chunk):
pass
def __run_middleware__(self, middleware_list):
"""
Gets the middleware attached to the route and executes it before the route is called. Middlewares in Bast are run before the
Controller Logic is executed. Returns true once it has been run successfully
"""
middleware_location = 'middleware'
return_value = False
try:
for func in middleware_list:
middleware_func = importlib.import_module('{0}.'.format(middleware_location) + func)
if hasattr(middleware_func, func):
class_name = getattr(middleware_func, func)
handle = getattr(class_name, 'handle')
return_value = handle(class_name, self)
return return_value
except Exception as e:
print("There is an Error in your Middleware ", e)
return return_value
def initialize(self, method, middleware, request_type):
"""
Overridden initialize method from Tornado. Assigns the controller method and middleware attached to the route being executed
to global variables to be used
"""
self.method = method
self.middleware = middleware
self.request_type = request_type
def only(self, arguments):
"""
returns the key, value pair of the arguments passed as a dict object
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.only(['username'])
Returns only the argument username and assigns it to the data variable.
"""
data = {}
if not isinstance(arguments, list):
arguments = list(arguments)
for i in arguments:
data[i] = self.get_argument(i)
return data
def all(self):
"""
Returns all the arguments passed with the request
Sample Usage
++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.all()
Returns a dictionary of all the request arguments
"""
data = {}
args = self.request.arguments
for key, value in args.items():
data[key] = self.get_argument(key)
return data
def except_(self, arguments):
"""
returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name``
"""
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data
def json(self, data):
"""
Encodes the dictionary being passed to JSON and sets the Header to application/json
"""
self.write(json_.encode(data))
self.set_header('Content-type', 'application/json')
@coroutine
def get(self, *args, **kwargs):
if self.request_type is not 'GET':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def post(self, *args, **kwargs):
if self.request_type is not 'POST':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def put(self, *args, **kwargs):
if self.request_type is not 'PUT':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
@coroutine
def delete(self, *args, **kwargs):
if self.request_type is not 'DELETE':
raise BastException(405, "Wrong Method. Expected Request Method: %s" % self.request_type)
if self.middleware is not None and len(self.middleware) > 0:
value = self.__run_middleware__(self.middleware)
if not value:
return
func = getattr(self, self.method)
if func:
func()
else:
raise BastException(404, "Controller Function Not Found")
def headers(self):
"""
Returns all headers associated with the request
"""
return self.request.headers
def header(self, param):
"""
Returns the header specified by the key provided
"""
return self.request.headers.get(param)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is None:
return None
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
v = self._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
v = {name: v}
values.append(v)
return values
|
moluwole/Bast | bast/validator/rules.py | is_required.run | python | def run(self, value):
if self.pass_ and not value.strip():
return True
if not value:
return False
return True | Determines if value value is empty.
Keyword arguments:
value str -- the value of the associated field to compare | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L128-L138 | null | class is_required(Rule):
""" Used to determine if given field is empty. """
def __init__(self, error=None, pass_=False):
if not error:
error = 'This field requires a value.'
super(is_required, self).__init__(error, pass_)
|
moluwole/Bast | bast/validator/rules.py | required_length.run | python | def run(self, value):
if self.pass_ and not value.strip():
return True
if len((value.strip() if self.strip else value)) != self.length:
self.error = self.error.format(value, self.length)
return False
return True | Determines if value character length equal self.length.
Keyword arguments:
value str -- the value of the associated field to compare | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L159-L170 | null | class required_length(Rule):
""" Used to determine whether the given associated field value's character length equals
the given maximum amount. """
def __init__(self, length, strip=False, error=None, pass_=False):
""" Constructor that instantiates a class instance and properties.
Keyword arguments:
length int -- Absolute maximum character length.
strip bool -- Used to strip whitespace from the given field value. (optional)
error str -- A user-defined error messaged for a failed (optional)
pass_ bool -- Pass through as success if field value is blank. (optional)
"""
if not error:
error = "String `{}` length does not equal `{}`"
super(required_length, self).__init__(error, pass_)
self.length = int(length)
self.strip = bool(strip)
|
moluwole/Bast | bast/validator/rules.py | length_between.run | python | def run(self, value):
if self.pass_ and not value.strip():
return True
if self.minimum <= len((value.strip() if self.strip else value)) <= self.maximum:
return True
self.error = self.error.format(value, self.minimum, self.maximum)
return False | Determines if value character length is between self.minimum and self.maximum.
Keyword arguments:
value str -- the value of the associated field to compare | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L193-L205 | null | class length_between(Rule):
""" Used to determine whether the given associated field value's character length is
within the given range. """
def __init__(self, minimum, maximum, **kwargs):
""" Constructor that instantiates a class instance and properties.
Keyword arguments:
minimum int -- Absolute minimum character length.
max int -- Absolute maximum character length.
strip bool -- Used to strip whitespace from the given field value. (optional)
error str -- A user-defined error messaged for a failed (optional)
pass_ bool -- Pass through as success if field value is blank. (optional)
"""
if not kwargs.get('error', None):
kwargs['error'] = "String `{}` length is not within `{}` and `{}`"
super(length_between, self).__init__(kwargs.get('error', None), kwargs.get('pass_', False))
self.minimum = int(minimum)
self.maximum = int(maximum)
self.strip = kwargs.get('strip', False)
|
moluwole/Bast | bast/validator/rules.py | in_list.run | python | def run(self, value):
if self.pass_ and not value.strip():
return True
if (value.strip() if self.strip else value) not in self.given_list:
self.error = self.error.format(value)
return False
return True | Checks if value is included within self.given_list.
Keyword arguments:
value str -- the value of the associated field to compare | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L225-L236 | null | class in_list(Rule):
""" Used to determine if the associated field's value exists within the specified list. """
def __init__(self, given_list, strip=False, error=None, pass_=False):
""" Constructor that instantiates a class instance and properties.
Keyword arguments:
given_list list -- List containing values to evaluate.
strip bool -- Used to strip whitespace from the given field value. (optional)
error str -- A user-defined error messaged for a failed (optional)
pass_ bool -- Pass through as success if field value is blank. (optional)
"""
if not error:
error = "Value of `{}` is not within the list"
super(in_list, self).__init__(error, pass_)
self.given_list = given_list
self.strip = strip
|
moluwole/Bast | bast/validator/rules.py | is_type.run | python | def run(self, value):
if self.pass_ and not value.strip():
return True
if not isinstance(value, type(self.asserted_type)):
self.error = self.error.format(type(value), self.asserted_type)
return False
return True | Compares value against self.asserted_type.
Keyword arguments:
value str -- the value of the associated field to compare | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/rules.py#L254-L265 | null | class is_type(Rule):
""" Rule that compares the associated field's value against a specified data type. """
def __init__(self, asserted_type, error=None, pass_=False):
""" Constructor that instantiates a class instance and properties.
Keyword arguments:
asserted_type mixed -- The type to compare the field value against.
error str -- A user-defined error messaged for a failed (optional)
pass_ bool -- Pass through as success if field value is blank. (optional)
"""
if not error:
error = "Type of `{}` is not of type `{}`"
super(is_type, self).__init__(error, pass_)
self.asserted_type = asserted_type
|
moluwole/Bast | bast/bast.py | Bast.run | python | def run(self):
define("port", default=self.port, help="Run on given port", type=int)
define("host", default=self.host, help="Run on given host", type=str)
define("debug", default=self.debug, help="True for development", type=bool)
parse_command_line()
print(Fore.GREEN + "Starting Bast Server....")
print(Fore.GREEN + "Bast Server Running on %s:%s" % (options.host, options.port))
application = Application(self.handler, debug=options.debug)
server = HTTPServer(application)
server.listen(options.port, options.host)
IOLoop.current().start() | Function to Run the server. Server runs on host: 127.0.0.1 and port: 2000 by default. Debug is also set to false
by default
Can be overriden by using the config.ini file | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/bast.py#L69-L88 | null | class Bast(Application):
image_folder = ""
script_folder = ""
css_folder = ""
template_folder = ""
host = None
port = None
debug = None
providers = {}
session = {}
def __init__(self, route):
"""
Bast Server Class. Runs on Tornado HTTP Server (http://www.tornadoweb.org/en/stable/)
Constructor for the Bast Server. Takes an instance of the route as parameter.
The Web handler with routes are handled here.
Config files are also loaded from the config/config.ini folder.
Appropriate configurations are loaded from the config file into the os environment for use
:param route:
"""
super(Bast, self).__init__()
init()
load_env()
self.config()
self.host = os.getenv("HOST", "127.0.0.1")
self.port = os.getenv("PORT", 2000)
self.debug = os.getenv("DEBUG", True)
self.handler = route.all().url
self.handler.append((r'/css/(.*)', StaticFileHandler, {"path": self.css_folder}))
self.handler.append((r'/script/(.*)', StaticFileHandler, {"path": self.script_folder}))
self.handler.append((r'/images/(.*)', StaticFileHandler, {"path": self.image_folder}))
# append the URL for static files to exception
self.handler.append((r'/exp/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(os.path.realpath(__file__)), "exception")}))
def config(self):
sys.path.extend([os.path.abspath('.')])
from config import config
static_files = config.STATIC_FILES
if config.SESSION_DRIVER is 'memory':
self.session.update({"session": MemorySession()})
else:
self.session.update({'session': FileSession()})
# providers = provider.providers
# print(providers['session'])
os.environ['TEMPLATE_FOLDER'] = os.path.join(os.path.abspath('.'), static_files['template'])
self.image_folder = os.path.join(os.path.abspath('.'), static_files['images'])
self.css_folder = os.path.join(os.path.abspath('.'), static_files['css'])
self.script_folder = os.path.join(os.path.abspath('.'), static_files['script'])
|
moluwole/Bast | bast/validator/validator.py | validator.run | python | def run(cls, return_results=False):
cls.result = []
passed = True
for field in cls.fields:
result, errors = field.run()
results = {
'field': field.name,
'value': field.value,
'passed': result,
'errors': None
}
if errors:
passed = False
results['errors'] = errors
cls.result.append(results)
if return_results:
return cls.result
return passed | Iterates through all associated Fields and applies all attached Rules. Depending on 'return_collated_results',
this method will either return True (all rules successful), False (all, or some, rules failed)
or a dictionary list
containing the collated results of all Field Rules.
Keyword arguments:
return_collated_results bool -- Returns dictionary list of Field Rule collated results instead of True or False. | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/validator/validator.py#L101-L132 | null | class validator(object):
fields = []
result = []
@classmethod
def add(cls, field_):
if isinstance(field_, list):
for f in field_:
if not isinstance(f, Field):
raise TypeError('parameter :field must be list of class Field instances')
cls.fields.append(f)
return cls
if not isinstance(field_, Field):
raise TypeError('parameter :field must be instance of class Field')
cls.fields.append(field_)
return cls
def __iter__(self):
""" Returns generator to iterate through assigned fields. """
for rule in self.fields:
yield rule
def __len__(self):
""" Implements built-in len() to return number of assigned fields. """
return len(self.fields)
def __getitem__(self, i):
""" Allows for self[key] access. Will raise IndexError if out of range. """
return self.fields[i]
def results(self):
""" Returns the collated results for the current collection instance. """
return self.result
def form(self):
""" Returns a dict representing the current form in field:value pairs. """
return {
field.title: field.value
for field in self.fields
}
def errors(self):
""" Returns a dict containing only a map of fields with any
corresponding errors or None if all rules passed.
"""
return {
f['field']: f['errors']
for f in self.result
if f['errors']
} or None
@classmethod
|
moluwole/Bast | bast/route.py | Route.middleware | python | def middleware(self, args):
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self | Appends a Middleware to the route which is to be executed before the route runs | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/route.py#L27-L34 | null | class Route(object):
"""
Route Class. Appends the URL, Controller Instance and Method to a list instance to be passed on to the server
instance
"""
url_ = None
controller = None
method = None
controller_location = 'controller'
url = []
request_type = ""
# def prefix(self, pref):
# return self
def __return_controller__(self, controller):
if isinstance(controller, str):
ctr = controller.split('.')
if ctr[0].startswith('/'):
self.controller_location = '.'.join(ctr[0].replace('/', '').split('.')[0:-1])
else:
raise Exception
get_controller = ctr[0].split('.')[-1]
try:
# Import the module
if isinstance(controller, str):
controller_name = importlib.import_module('{0}.'.format(self.controller_location) + get_controller)
else:
controller_name = importlib.import_module('{0}'.format(self.controller_location))
# Get the controller from the module
controller_class = getattr(controller_name, get_controller)
# Get the controller method from the class which in this case is the string
controller_method = ctr[1]
return controller_class, controller_method
except Exception as e:
print('\033[93mError in your routes/link.py!', e, '\033[0m')
def get(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for GET request
"""
self.request_type = 'GET'
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def post(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for the POST request
"""
self.request_type = "POST"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def put(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for PUT request
"""
self.request_type = "PUT"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def delete(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for the DELETE request
"""
self.request_type = "DELETE"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def all(self):
"""
Returns the list of URL. Used by Server to get the list of URLS. This is passed to the Bast HTTP Server
"""
return self
|
moluwole/Bast | bast/route.py | Route.get | python | def get(self, url, controller):
self.request_type = 'GET'
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self | Gets the Controller and adds the route, controller and method to the url list for GET request | train | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/route.py#L62-L75 | [
"def __return_controller__(self, controller):\n if isinstance(controller, str):\n ctr = controller.split('.')\n if ctr[0].startswith('/'):\n self.controller_location = '.'.join(ctr[0].replace('/', '').split('.')[0:-1])\n else:\n raise Exception\n\n get_controller = ctr[0].sp... | class Route(object):
"""
Route Class. Appends the URL, Controller Instance and Method to a list instance to be passed on to the server
instance
"""
url_ = None
controller = None
method = None
controller_location = 'controller'
url = []
request_type = ""
# def prefix(self, pref):
# return self
def middleware(self, args):
"""
Appends a Middleware to the route which is to be executed before the route runs
"""
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self
def __return_controller__(self, controller):
if isinstance(controller, str):
ctr = controller.split('.')
if ctr[0].startswith('/'):
self.controller_location = '.'.join(ctr[0].replace('/', '').split('.')[0:-1])
else:
raise Exception
get_controller = ctr[0].split('.')[-1]
try:
# Import the module
if isinstance(controller, str):
controller_name = importlib.import_module('{0}.'.format(self.controller_location) + get_controller)
else:
controller_name = importlib.import_module('{0}'.format(self.controller_location))
# Get the controller from the module
controller_class = getattr(controller_name, get_controller)
# Get the controller method from the class which in this case is the string
controller_method = ctr[1]
return controller_class, controller_method
except Exception as e:
print('\033[93mError in your routes/link.py!', e, '\033[0m')
def post(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for the POST request
"""
self.request_type = "POST"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def put(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for PUT request
"""
self.request_type = "PUT"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def delete(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for the DELETE request
"""
self.request_type = "DELETE"
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self
def all(self):
"""
Returns the list of URL. Used by Server to get the list of URLS. This is passed to the Bast HTTP Server
"""
return self
|
fkarb/xltable | xltable/workbook.py | Workbook.itersheets | python | def itersheets(self):
for ws in self.worksheets:
# Expression with no explicit table specified will use None
# when calling get_table, which should return the current worksheet/table
prev_ws = self.active_worksheet
self.active_worksheet = ws
try:
yield ws
finally:
self.active_worksheet = prev_ws | Iterates over the worksheets in the book, and sets the active
worksheet as the current one before yielding. | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L47-L60 | null | class Workbook(object):
"""
A workbook is an ordered collection of worksheets.
Once all worksheets have been added the workbook can be written out or
the worksheets can be iterated over, and any expressions present in the
tables of the worksheets will be resolved to absolute worksheet/cell references.
:param str filename: Filename the workbook will be written to.
:param list worksheets: List of :py:class:`xltable.Worksheet` instances.
"""
def __init__(self, filename=None, worksheets=[]):
self.filename = filename
self.worksheets = list(worksheets)
self.calc_mode = "auto"
self.workbook_obj = None
# The active table and worksheet objects are set during export, and
# are used to resolve expressions where the table and/or sheet isn't
# set explicitly (in which case the current table is used implicitly).
self.active_table = None
self.active_worksheet = None
def add_sheet(self, worksheet):
"""
Adds a worksheet to the workbook.
"""
self.worksheets.append(worksheet)
# alias for add_sheet
append = add_sheet
def set_calc_mode(self, mode):
"""
Set the calculation mode for the Excel workbook
"""
self.calc_mode = mode
def to_xlsx(self, **kwargs):
"""
Write workbook to a .xlsx file using xlsxwriter.
Return a xlsxwriter.workbook.Workbook.
:param kwargs: Extra arguments passed to the xlsxwriter.Workbook
constructor.
"""
from xlsxwriter.workbook import Workbook as _Workbook
self.workbook_obj = _Workbook(**kwargs)
self.workbook_obj.set_calc_mode(self.calc_mode)
for worksheet in self.itersheets():
worksheet.to_xlsx(workbook=self)
self.workbook_obj.filename = self.filename
if self.filename:
self.workbook_obj.close()
return self.workbook_obj
def to_excel(self, xl_app=None, resize_columns=True):
from win32com.client import Dispatch, gencache
if xl_app is None:
xl_app = Dispatch("Excel.Application")
xl_app = gencache.EnsureDispatch(xl_app)
# Add a new workbook with the correct number of sheets.
# We aren't allowed to create an empty one.
assert self.worksheets, "Can't export workbook with no worksheets"
sheets_in_new_workbook = xl_app.SheetsInNewWorkbook
try:
xl_app.SheetsInNewWorkbook = float(len(self.worksheets))
self.workbook_obj = xl_app.Workbooks.Add()
finally:
xl_app.SheetsInNewWorkbook = sheets_in_new_workbook
# Rename the worksheets, ensuring that there can never be two sheets with the same
# name due to the sheets default names conflicting with the new names.
sheet_names = {s.name for s in self.worksheets}
assert len(sheet_names) == len(self.worksheets), "Worksheets must have unique names"
for worksheet in self.workbook_obj.Sheets:
i = 1
original_name = worksheet.Name
while worksheet.Name in sheet_names:
worksheet.Name = "%s_%d" % (original_name, i)
i += 1
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.worksheets):
worksheet.Name = sheet.name
# Export each sheet (have to use itersheets for this as it sets the
# current active sheet before yielding each one).
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.itersheets()):
worksheet.Select()
sheet.to_excel(workbook=self,
worksheet=worksheet,
xl_app=xl_app,
rename=False,
resize_columns=resize_columns)
return self.workbook_obj
def get_last_sheet(self):
return self.workbook_obj.Sheets[self.workbook_obj.Sheets.Count]
def add_xlsx_worksheet(self, worksheet, name):
if worksheet not in self.worksheets:
self.append(worksheet)
return self.workbook_obj.add_worksheet(name)
def add_excel_worksheet(self, after=None):
if after is None:
after = self.get_last_sheet()
return self.workbook_obj.Sheets.Add(After=after)
def add_format(self, *args, **kwargs):
return self.workbook_obj.add_format(*args, **kwargs)
def get_table(self, name):
"""
Return a table, worksheet pair for the named table
"""
if name is None:
assert self.active_table, "Can't get table without name unless an active table is set"
name = self.active_table.name
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
assert table is self.active_table, "Active table is not from the active sheet"
return table, self.active_worksheet
for ws in self.worksheets:
try:
table = ws.get_table(name)
if table is self.active_table:
return table, ws
except KeyError:
pass
raise RuntimeError("Active table not found in any sheet")
# if the tablename explicitly uses the sheetname find the right sheet
if "!" in name:
ws_name, table_name = map(lambda x: x.strip("'"), name.split("!", 1))
for ws in self.worksheets:
if ws.name == ws_name:
table = ws.get_table(table_name)
return table, ws
raise KeyError(name)
# otherwise look in the current table
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
return table, self.active_worksheet
# or fallback to the first matching name in any table
for ws in self.worksheets:
try:
table = ws.get_table(name)
return table, ws
except KeyError:
pass
raise KeyError(name)
|
fkarb/xltable | xltable/workbook.py | Workbook.to_xlsx | python | def to_xlsx(self, **kwargs):
from xlsxwriter.workbook import Workbook as _Workbook
self.workbook_obj = _Workbook(**kwargs)
self.workbook_obj.set_calc_mode(self.calc_mode)
for worksheet in self.itersheets():
worksheet.to_xlsx(workbook=self)
self.workbook_obj.filename = self.filename
if self.filename:
self.workbook_obj.close()
return self.workbook_obj | Write workbook to a .xlsx file using xlsxwriter.
Return a xlsxwriter.workbook.Workbook.
:param kwargs: Extra arguments passed to the xlsxwriter.Workbook
constructor. | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L62-L80 | [
"def itersheets(self):\n \"\"\"\n Iterates over the worksheets in the book, and sets the active\n worksheet as the current one before yielding.\n \"\"\"\n for ws in self.worksheets:\n # Expression with no explicit table specified will use None\n # when calling get_table, which should re... | class Workbook(object):
"""
A workbook is an ordered collection of worksheets.
Once all worksheets have been added the workbook can be written out or
the worksheets can be iterated over, and any expressions present in the
tables of the worksheets will be resolved to absolute worksheet/cell references.
:param str filename: Filename the workbook will be written to.
:param list worksheets: List of :py:class:`xltable.Worksheet` instances.
"""
def __init__(self, filename=None, worksheets=[]):
self.filename = filename
self.worksheets = list(worksheets)
self.calc_mode = "auto"
self.workbook_obj = None
# The active table and worksheet objects are set during export, and
# are used to resolve expressions where the table and/or sheet isn't
# set explicitly (in which case the current table is used implicitly).
self.active_table = None
self.active_worksheet = None
def add_sheet(self, worksheet):
"""
Adds a worksheet to the workbook.
"""
self.worksheets.append(worksheet)
# alias for add_sheet
append = add_sheet
def set_calc_mode(self, mode):
"""
Set the calculation mode for the Excel workbook
"""
self.calc_mode = mode
def itersheets(self):
"""
Iterates over the worksheets in the book, and sets the active
worksheet as the current one before yielding.
"""
for ws in self.worksheets:
# Expression with no explicit table specified will use None
# when calling get_table, which should return the current worksheet/table
prev_ws = self.active_worksheet
self.active_worksheet = ws
try:
yield ws
finally:
self.active_worksheet = prev_ws
def to_excel(self, xl_app=None, resize_columns=True):
from win32com.client import Dispatch, gencache
if xl_app is None:
xl_app = Dispatch("Excel.Application")
xl_app = gencache.EnsureDispatch(xl_app)
# Add a new workbook with the correct number of sheets.
# We aren't allowed to create an empty one.
assert self.worksheets, "Can't export workbook with no worksheets"
sheets_in_new_workbook = xl_app.SheetsInNewWorkbook
try:
xl_app.SheetsInNewWorkbook = float(len(self.worksheets))
self.workbook_obj = xl_app.Workbooks.Add()
finally:
xl_app.SheetsInNewWorkbook = sheets_in_new_workbook
# Rename the worksheets, ensuring that there can never be two sheets with the same
# name due to the sheets default names conflicting with the new names.
sheet_names = {s.name for s in self.worksheets}
assert len(sheet_names) == len(self.worksheets), "Worksheets must have unique names"
for worksheet in self.workbook_obj.Sheets:
i = 1
original_name = worksheet.Name
while worksheet.Name in sheet_names:
worksheet.Name = "%s_%d" % (original_name, i)
i += 1
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.worksheets):
worksheet.Name = sheet.name
# Export each sheet (have to use itersheets for this as it sets the
# current active sheet before yielding each one).
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.itersheets()):
worksheet.Select()
sheet.to_excel(workbook=self,
worksheet=worksheet,
xl_app=xl_app,
rename=False,
resize_columns=resize_columns)
return self.workbook_obj
def get_last_sheet(self):
return self.workbook_obj.Sheets[self.workbook_obj.Sheets.Count]
def add_xlsx_worksheet(self, worksheet, name):
if worksheet not in self.worksheets:
self.append(worksheet)
return self.workbook_obj.add_worksheet(name)
def add_excel_worksheet(self, after=None):
if after is None:
after = self.get_last_sheet()
return self.workbook_obj.Sheets.Add(After=after)
def add_format(self, *args, **kwargs):
return self.workbook_obj.add_format(*args, **kwargs)
def get_table(self, name):
"""
Return a table, worksheet pair for the named table
"""
if name is None:
assert self.active_table, "Can't get table without name unless an active table is set"
name = self.active_table.name
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
assert table is self.active_table, "Active table is not from the active sheet"
return table, self.active_worksheet
for ws in self.worksheets:
try:
table = ws.get_table(name)
if table is self.active_table:
return table, ws
except KeyError:
pass
raise RuntimeError("Active table not found in any sheet")
# if the tablename explicitly uses the sheetname find the right sheet
if "!" in name:
ws_name, table_name = map(lambda x: x.strip("'"), name.split("!", 1))
for ws in self.worksheets:
if ws.name == ws_name:
table = ws.get_table(table_name)
return table, ws
raise KeyError(name)
# otherwise look in the current table
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
return table, self.active_worksheet
# or fallback to the first matching name in any table
for ws in self.worksheets:
try:
table = ws.get_table(name)
return table, ws
except KeyError:
pass
raise KeyError(name)
|
fkarb/xltable | xltable/workbook.py | Workbook.get_table | python | def get_table(self, name):
if name is None:
assert self.active_table, "Can't get table without name unless an active table is set"
name = self.active_table.name
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
assert table is self.active_table, "Active table is not from the active sheet"
return table, self.active_worksheet
for ws in self.worksheets:
try:
table = ws.get_table(name)
if table is self.active_table:
return table, ws
except KeyError:
pass
raise RuntimeError("Active table not found in any sheet")
# if the tablename explicitly uses the sheetname find the right sheet
if "!" in name:
ws_name, table_name = map(lambda x: x.strip("'"), name.split("!", 1))
for ws in self.worksheets:
if ws.name == ws_name:
table = ws.get_table(table_name)
return table, ws
raise KeyError(name)
# otherwise look in the current table
if self.active_worksheet:
table = self.active_worksheet.get_table(name)
return table, self.active_worksheet
# or fallback to the first matching name in any table
for ws in self.worksheets:
try:
table = ws.get_table(name)
return table, ws
except KeyError:
pass
raise KeyError(name) | Return a table, worksheet pair for the named table | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L141-L186 | null | class Workbook(object):
"""
A workbook is an ordered collection of worksheets.
Once all worksheets have been added the workbook can be written out or
the worksheets can be iterated over, and any expressions present in the
tables of the worksheets will be resolved to absolute worksheet/cell references.
:param str filename: Filename the workbook will be written to.
:param list worksheets: List of :py:class:`xltable.Worksheet` instances.
"""
def __init__(self, filename=None, worksheets=[]):
self.filename = filename
self.worksheets = list(worksheets)
self.calc_mode = "auto"
self.workbook_obj = None
# The active table and worksheet objects are set during export, and
# are used to resolve expressions where the table and/or sheet isn't
# set explicitly (in which case the current table is used implicitly).
self.active_table = None
self.active_worksheet = None
def add_sheet(self, worksheet):
"""
Adds a worksheet to the workbook.
"""
self.worksheets.append(worksheet)
# alias for add_sheet
append = add_sheet
def set_calc_mode(self, mode):
"""
Set the calculation mode for the Excel workbook
"""
self.calc_mode = mode
def itersheets(self):
"""
Iterates over the worksheets in the book, and sets the active
worksheet as the current one before yielding.
"""
for ws in self.worksheets:
# Expression with no explicit table specified will use None
# when calling get_table, which should return the current worksheet/table
prev_ws = self.active_worksheet
self.active_worksheet = ws
try:
yield ws
finally:
self.active_worksheet = prev_ws
def to_xlsx(self, **kwargs):
"""
Write workbook to a .xlsx file using xlsxwriter.
Return a xlsxwriter.workbook.Workbook.
:param kwargs: Extra arguments passed to the xlsxwriter.Workbook
constructor.
"""
from xlsxwriter.workbook import Workbook as _Workbook
self.workbook_obj = _Workbook(**kwargs)
self.workbook_obj.set_calc_mode(self.calc_mode)
for worksheet in self.itersheets():
worksheet.to_xlsx(workbook=self)
self.workbook_obj.filename = self.filename
if self.filename:
self.workbook_obj.close()
return self.workbook_obj
def to_excel(self, xl_app=None, resize_columns=True):
from win32com.client import Dispatch, gencache
if xl_app is None:
xl_app = Dispatch("Excel.Application")
xl_app = gencache.EnsureDispatch(xl_app)
# Add a new workbook with the correct number of sheets.
# We aren't allowed to create an empty one.
assert self.worksheets, "Can't export workbook with no worksheets"
sheets_in_new_workbook = xl_app.SheetsInNewWorkbook
try:
xl_app.SheetsInNewWorkbook = float(len(self.worksheets))
self.workbook_obj = xl_app.Workbooks.Add()
finally:
xl_app.SheetsInNewWorkbook = sheets_in_new_workbook
# Rename the worksheets, ensuring that there can never be two sheets with the same
# name due to the sheets default names conflicting with the new names.
sheet_names = {s.name for s in self.worksheets}
assert len(sheet_names) == len(self.worksheets), "Worksheets must have unique names"
for worksheet in self.workbook_obj.Sheets:
i = 1
original_name = worksheet.Name
while worksheet.Name in sheet_names:
worksheet.Name = "%s_%d" % (original_name, i)
i += 1
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.worksheets):
worksheet.Name = sheet.name
# Export each sheet (have to use itersheets for this as it sets the
# current active sheet before yielding each one).
for worksheet, sheet in zip(self.workbook_obj.Sheets, self.itersheets()):
worksheet.Select()
sheet.to_excel(workbook=self,
worksheet=worksheet,
xl_app=xl_app,
rename=False,
resize_columns=resize_columns)
return self.workbook_obj
def get_last_sheet(self):
return self.workbook_obj.Sheets[self.workbook_obj.Sheets.Count]
def add_xlsx_worksheet(self, worksheet, name):
if worksheet not in self.worksheets:
self.append(worksheet)
return self.workbook_obj.add_worksheet(name)
def add_excel_worksheet(self, after=None):
if after is None:
after = self.get_last_sheet()
return self.workbook_obj.Sheets.Add(After=after)
def add_format(self, *args, **kwargs):
return self.workbook_obj.add_format(*args, **kwargs)
|
fkarb/xltable | xltable/table.py | Table.clone | python | def clone(self, **kwargs):
init_kwargs = {
"name": self.__name,
"dataframe": self.__df,
"include_columns": self.__include_columns,
"include_index": self.__include_index,
"style": self.__style,
"column_styles": self.__col_styles,
"column_widths": self.__column_widths,
"row_styles": self.__row_styles,
"header_style": self.header_style,
"index_style": self.index_style
}
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs) | Create a clone of the Table, optionally with some properties changed | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/table.py#L105-L120 | null | class Table(object):
"""
Represents of table of data to be written to Excel, and
may include :py:class:`xltable.Expression`s that will be converted into Excel
formulas when the table's position is fixed.
:param str name: Name of the table so it can be referenced by other tables and charts.
:param pandas.DataFrame dataframe: Dataframe containing the data for the table.
:param bool include_columns: Include the column names when outputting.
:param bool include_index: Include the index when outputting.
:param xltable.TableStyle style: Table style, or one of the named styles 'default' or 'plain'.
:param xltable.CellStyle column_styles: Dictionary of column names to styles or named styles.
:param dict column_widths: Dictionary of column names to widths.
:param xltable.CellStyle header_style: Style or named style to use for the cells in the header row.
:param xltable.CellStyle index_style: Style or named style to use for the cells in the index column.
Named table styles:
- default: blue stripes
- plain: no style
Named cell styles:
- pct: pecentage with two decimal places.
- iso-date: date in YYYY-MM-DD format.
- 2dp: two decimal places.
- 2dpc: thousand separated number to two decimal places.
"""
_styles = {
"default": TableStyle(),
"plain": TableStyle(stripe_colors=None)
}
_named_styles = {
"pct": CellStyle(is_percentage=True, decimal_places=2),
"iso-date": CellStyle(date_format="%Y-%m-%d"),
"2dp": CellStyle(decimal_places=2),
"2dpc": CellStyle(decimal_places=2, thousands_sep=True),
}
def __init__(self,
name,
dataframe,
include_columns=True,
include_index=False,
style="default",
column_styles={},
column_widths={},
row_styles={},
header_style=None,
index_style=None):
self.__name = name
self.__df = dataframe
self.__position = None
self.__include_columns = include_columns
self.__include_index = include_index
self.__column_widths = column_widths
if isinstance(style, TableStyle):
self.__style = style
else:
self.__style = self._styles.get(style)
self.__col_styles = {}
for col, style in column_styles.items():
if isinstance(style, CellStyle):
self.__col_styles[col] = style
else:
self.__col_styles[col] = self._named_styles[style]
self.__row_styles = {}
for row, style in row_styles.items():
if isinstance(style, CellStyle):
self.__row_styles[row] = style
else:
self.__row_styles[row] = self._named_styles[style]
self.header_style = header_style
self.index_style = index_style
@property
def name(self):
return self.__name
@property
def dataframe(self):
return self.__df
@property
def style(self):
return self.__style
@property
def column_styles(self):
return self.__col_styles
@property
def row_styles(self):
return self.__row_styles
@property
def column_widths(self):
return self.__column_widths
@property
def cell_styles(self):
"""dict of {(row name, col name): style}"""
styles = {}
for colname, col in self.dataframe.items():
for rowname, value in col.items():
if isinstance(value, Value) and value.style is not None:
style = value.style
if not isinstance(style, CellStyle):
style = self._named_styles[style]
styles[(rowname, colname)] = style
return styles
@property
def width(self):
return len(self.dataframe.columns) + self.row_labels_width
@property
def height(self):
return len(self.dataframe.index) + self.header_height
@property
def header_height(self):
if self.__include_columns:
if isinstance(self.dataframe.columns, pa.MultiIndex):
return len(self.dataframe.columns.names)
return 1
return 0
@property
def row_labels_width(self):
if self.__include_index:
if isinstance(self.dataframe.index, pa.MultiIndex):
return len(self.dataframe.index.names)
return 1
return 0
def get_column_offset(self, col):
try:
offset = self.dataframe.columns.get_loc(col)
except KeyError:
raise KeyError("Column '%s' not found in table %s" % (col, self.name))
offset += self.row_labels_width
return offset
def get_index_offset(self):
if self.__include_index:
return 0
raise KeyError("Table '%s' has no index" % self.name)
def get_row_offset(self, row):
try:
offset = self.dataframe.index.get_loc(row)
except KeyError:
raise KeyError("Row '%s' not found in table %s" % (row, self.name))
offset += self.header_height
return offset
def get_data(self, workbook, row, col, formula_values={}):
"""
:return: 2d numpy array for this table with any formulas resolved to the final
excel formula.
:param xltable.Workbook workbook: Workbook the table has been added to.
:param int row: Row where the table will start in the sheet (used for resolving formulas).
:param int col: Column where the table will start in the sheet (used for resolving formulas).
:param formula_values: dict to add pre-calculated formula values to (keyed by row, col).
"""
if workbook:
prev_table = workbook.active_table
workbook.active_table = self
try:
return self._get_data_impl(workbook, row, col, formula_values)
finally:
if workbook:
workbook.active_table = prev_table
def _get_data_impl(self, workbook, row, col, formula_values={}):
df = self.dataframe.copy()
# replace any Value instances with their value
if df.applymap(lambda x: isinstance(x, Value)).any().any():
df = df.applymap(lambda x: x.value if isinstance(x, Value) else x)
# create a mask for elements that are expressions
mask_df = df.applymap(lambda x: isinstance(x, Expression))
# resolve any expressions if there are any
if mask_df.any().any():
# create a dataframe for indexing both into the dataframe and with the column and
# row numbers.
idx = [[(r, c) for r in range(len((df.index)))] for c in range(len((df.columns)))]
index_df = pa.DataFrame(dict(zip(df.columns, idx)), columns=df.columns, index=df.index)
# convert everything to objects so mask setting works
df = df.astype(object)
col_offset = self.row_labels_width
row_offset = self.header_height
# resolve all elements and set back into the main dataframe
def get_formula(df, element):
if pa.isnull(element):
return element
r, c = element
expr = df.iat[r, c]
r += row_offset
c += col_offset
if expr.has_value:
formula_values[(r + row, c + col)] = expr.value
return expr.get_formula(workbook, r, c)
df[mask_df] = index_df[mask_df].applymap(partial(get_formula, df))
# add the index and or columns to the values part of the dataframe
if self.__include_index or self.__include_columns:
index = df.index
if self.__include_columns:
# add the index names to the top of the index to create a new row for the column headers
if isinstance(index, pa.MultiIndex):
index_names = tuple((x or "" for x in df.index.names))
i = 1
while index_names in df.index:
index_names = tuple(("%s_%d" % (x or "", i) for x in df.index.names))
i += 1
index_tuples = [index_names] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.index.names))
index_tuples = ([blank_tuple] * (len(df.columns.levels) - 1)) + index_tuples
index = pa.MultiIndex.from_tuples(index_tuples)
else:
index_name = df.index.name
i = 1
while index_name in df.index:
index_name = "%s_%d" % (df.index.name, i)
i += 1
index = [index_name] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
index = ([None] * (len(df.columns.levels) - 1)) + index
columns = df.columns
if self.__include_index:
# add the column names to the left of the columns to create a new row for the index headers
if isinstance(columns, pa.MultiIndex):
columns_names = tuple((x or "" for x in df.columns.names))
i = 1
while columns_names in df.columns:
columns_names = tuple(("%s_%d" % (x or "", i) for x in df.columns.names))
i += 1
column_tuples = [columns_names] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.columns.names))
column_tuples = ([blank_tuple] * (len(df.index.levels) - 1)) + column_tuples
columns = pa.MultiIndex.from_tuples(column_tuples)
else:
columns_name = df.columns.name or ""
i = 1
while columns_name in df.columns:
columns_name = "%s_%d" % (df.columns.name, i)
i += 1
columns = [columns_name] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
columns = ([None] * (len(df.index.levels) - 1)) + columns
df = df.reindex(index=index, columns=columns).astype(object)
if self.__include_columns:
if isinstance(df.columns, pa.MultiIndex):
for i in range(len(df.columns.levels)):
df.iloc[i, :] = [c[i] for c in df.columns.values]
else:
df.iloc[0, :] = df.columns
if self.__include_index:
if isinstance(df.index, pa.MultiIndex):
for i in range(len(df.index.levels)):
df.iloc[:, i] = [c[i] for c in df.index.values]
else:
df.iloc[:, 0] = df.index
# return the values as an np array
return df.values
|
fkarb/xltable | xltable/table.py | Table.cell_styles | python | def cell_styles(self):
styles = {}
for colname, col in self.dataframe.items():
for rowname, value in col.items():
if isinstance(value, Value) and value.style is not None:
style = value.style
if not isinstance(style, CellStyle):
style = self._named_styles[style]
styles[(rowname, colname)] = style
return styles | dict of {(row name, col name): style} | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/table.py#L147-L157 | null | class Table(object):
"""
Represents of table of data to be written to Excel, and
may include :py:class:`xltable.Expression`s that will be converted into Excel
formulas when the table's position is fixed.
:param str name: Name of the table so it can be referenced by other tables and charts.
:param pandas.DataFrame dataframe: Dataframe containing the data for the table.
:param bool include_columns: Include the column names when outputting.
:param bool include_index: Include the index when outputting.
:param xltable.TableStyle style: Table style, or one of the named styles 'default' or 'plain'.
:param xltable.CellStyle column_styles: Dictionary of column names to styles or named styles.
:param dict column_widths: Dictionary of column names to widths.
:param xltable.CellStyle header_style: Style or named style to use for the cells in the header row.
:param xltable.CellStyle index_style: Style or named style to use for the cells in the index column.
Named table styles:
- default: blue stripes
- plain: no style
Named cell styles:
- pct: pecentage with two decimal places.
- iso-date: date in YYYY-MM-DD format.
- 2dp: two decimal places.
- 2dpc: thousand separated number to two decimal places.
"""
_styles = {
"default": TableStyle(),
"plain": TableStyle(stripe_colors=None)
}
_named_styles = {
"pct": CellStyle(is_percentage=True, decimal_places=2),
"iso-date": CellStyle(date_format="%Y-%m-%d"),
"2dp": CellStyle(decimal_places=2),
"2dpc": CellStyle(decimal_places=2, thousands_sep=True),
}
def __init__(self,
name,
dataframe,
include_columns=True,
include_index=False,
style="default",
column_styles={},
column_widths={},
row_styles={},
header_style=None,
index_style=None):
self.__name = name
self.__df = dataframe
self.__position = None
self.__include_columns = include_columns
self.__include_index = include_index
self.__column_widths = column_widths
if isinstance(style, TableStyle):
self.__style = style
else:
self.__style = self._styles.get(style)
self.__col_styles = {}
for col, style in column_styles.items():
if isinstance(style, CellStyle):
self.__col_styles[col] = style
else:
self.__col_styles[col] = self._named_styles[style]
self.__row_styles = {}
for row, style in row_styles.items():
if isinstance(style, CellStyle):
self.__row_styles[row] = style
else:
self.__row_styles[row] = self._named_styles[style]
self.header_style = header_style
self.index_style = index_style
def clone(self, **kwargs):
"""Create a clone of the Table, optionally with some properties changed"""
init_kwargs = {
"name": self.__name,
"dataframe": self.__df,
"include_columns": self.__include_columns,
"include_index": self.__include_index,
"style": self.__style,
"column_styles": self.__col_styles,
"column_widths": self.__column_widths,
"row_styles": self.__row_styles,
"header_style": self.header_style,
"index_style": self.index_style
}
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
@property
def name(self):
return self.__name
@property
def dataframe(self):
return self.__df
@property
def style(self):
return self.__style
@property
def column_styles(self):
return self.__col_styles
@property
def row_styles(self):
return self.__row_styles
@property
def column_widths(self):
return self.__column_widths
@property
@property
def width(self):
return len(self.dataframe.columns) + self.row_labels_width
@property
def height(self):
return len(self.dataframe.index) + self.header_height
@property
def header_height(self):
if self.__include_columns:
if isinstance(self.dataframe.columns, pa.MultiIndex):
return len(self.dataframe.columns.names)
return 1
return 0
@property
def row_labels_width(self):
if self.__include_index:
if isinstance(self.dataframe.index, pa.MultiIndex):
return len(self.dataframe.index.names)
return 1
return 0
def get_column_offset(self, col):
try:
offset = self.dataframe.columns.get_loc(col)
except KeyError:
raise KeyError("Column '%s' not found in table %s" % (col, self.name))
offset += self.row_labels_width
return offset
def get_index_offset(self):
if self.__include_index:
return 0
raise KeyError("Table '%s' has no index" % self.name)
def get_row_offset(self, row):
try:
offset = self.dataframe.index.get_loc(row)
except KeyError:
raise KeyError("Row '%s' not found in table %s" % (row, self.name))
offset += self.header_height
return offset
def get_data(self, workbook, row, col, formula_values={}):
"""
:return: 2d numpy array for this table with any formulas resolved to the final
excel formula.
:param xltable.Workbook workbook: Workbook the table has been added to.
:param int row: Row where the table will start in the sheet (used for resolving formulas).
:param int col: Column where the table will start in the sheet (used for resolving formulas).
:param formula_values: dict to add pre-calculated formula values to (keyed by row, col).
"""
if workbook:
prev_table = workbook.active_table
workbook.active_table = self
try:
return self._get_data_impl(workbook, row, col, formula_values)
finally:
if workbook:
workbook.active_table = prev_table
def _get_data_impl(self, workbook, row, col, formula_values={}):
df = self.dataframe.copy()
# replace any Value instances with their value
if df.applymap(lambda x: isinstance(x, Value)).any().any():
df = df.applymap(lambda x: x.value if isinstance(x, Value) else x)
# create a mask for elements that are expressions
mask_df = df.applymap(lambda x: isinstance(x, Expression))
# resolve any expressions if there are any
if mask_df.any().any():
# create a dataframe for indexing both into the dataframe and with the column and
# row numbers.
idx = [[(r, c) for r in range(len((df.index)))] for c in range(len((df.columns)))]
index_df = pa.DataFrame(dict(zip(df.columns, idx)), columns=df.columns, index=df.index)
# convert everything to objects so mask setting works
df = df.astype(object)
col_offset = self.row_labels_width
row_offset = self.header_height
# resolve all elements and set back into the main dataframe
def get_formula(df, element):
if pa.isnull(element):
return element
r, c = element
expr = df.iat[r, c]
r += row_offset
c += col_offset
if expr.has_value:
formula_values[(r + row, c + col)] = expr.value
return expr.get_formula(workbook, r, c)
df[mask_df] = index_df[mask_df].applymap(partial(get_formula, df))
# add the index and or columns to the values part of the dataframe
if self.__include_index or self.__include_columns:
index = df.index
if self.__include_columns:
# add the index names to the top of the index to create a new row for the column headers
if isinstance(index, pa.MultiIndex):
index_names = tuple((x or "" for x in df.index.names))
i = 1
while index_names in df.index:
index_names = tuple(("%s_%d" % (x or "", i) for x in df.index.names))
i += 1
index_tuples = [index_names] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.index.names))
index_tuples = ([blank_tuple] * (len(df.columns.levels) - 1)) + index_tuples
index = pa.MultiIndex.from_tuples(index_tuples)
else:
index_name = df.index.name
i = 1
while index_name in df.index:
index_name = "%s_%d" % (df.index.name, i)
i += 1
index = [index_name] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
index = ([None] * (len(df.columns.levels) - 1)) + index
columns = df.columns
if self.__include_index:
# add the column names to the left of the columns to create a new row for the index headers
if isinstance(columns, pa.MultiIndex):
columns_names = tuple((x or "" for x in df.columns.names))
i = 1
while columns_names in df.columns:
columns_names = tuple(("%s_%d" % (x or "", i) for x in df.columns.names))
i += 1
column_tuples = [columns_names] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.columns.names))
column_tuples = ([blank_tuple] * (len(df.index.levels) - 1)) + column_tuples
columns = pa.MultiIndex.from_tuples(column_tuples)
else:
columns_name = df.columns.name or ""
i = 1
while columns_name in df.columns:
columns_name = "%s_%d" % (df.columns.name, i)
i += 1
columns = [columns_name] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
columns = ([None] * (len(df.index.levels) - 1)) + columns
df = df.reindex(index=index, columns=columns).astype(object)
if self.__include_columns:
if isinstance(df.columns, pa.MultiIndex):
for i in range(len(df.columns.levels)):
df.iloc[i, :] = [c[i] for c in df.columns.values]
else:
df.iloc[0, :] = df.columns
if self.__include_index:
if isinstance(df.index, pa.MultiIndex):
for i in range(len(df.index.levels)):
df.iloc[:, i] = [c[i] for c in df.index.values]
else:
df.iloc[:, 0] = df.index
# return the values as an np array
return df.values
|
fkarb/xltable | xltable/table.py | Table.get_data | python | def get_data(self, workbook, row, col, formula_values={}):
if workbook:
prev_table = workbook.active_table
workbook.active_table = self
try:
return self._get_data_impl(workbook, row, col, formula_values)
finally:
if workbook:
workbook.active_table = prev_table | :return: 2d numpy array for this table with any formulas resolved to the final
excel formula.
:param xltable.Workbook workbook: Workbook the table has been added to.
:param int row: Row where the table will start in the sheet (used for resolving formulas).
:param int col: Column where the table will start in the sheet (used for resolving formulas).
:param formula_values: dict to add pre-calculated formula values to (keyed by row, col). | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/table.py#L204-L220 | [
"def _get_data_impl(self, workbook, row, col, formula_values={}):\n df = self.dataframe.copy()\n\n # replace any Value instances with their value\n if df.applymap(lambda x: isinstance(x, Value)).any().any():\n df = df.applymap(lambda x: x.value if isinstance(x, Value) else x)\n\n # create a mask ... | class Table(object):
"""
Represents of table of data to be written to Excel, and
may include :py:class:`xltable.Expression`s that will be converted into Excel
formulas when the table's position is fixed.
:param str name: Name of the table so it can be referenced by other tables and charts.
:param pandas.DataFrame dataframe: Dataframe containing the data for the table.
:param bool include_columns: Include the column names when outputting.
:param bool include_index: Include the index when outputting.
:param xltable.TableStyle style: Table style, or one of the named styles 'default' or 'plain'.
:param xltable.CellStyle column_styles: Dictionary of column names to styles or named styles.
:param dict column_widths: Dictionary of column names to widths.
:param xltable.CellStyle header_style: Style or named style to use for the cells in the header row.
:param xltable.CellStyle index_style: Style or named style to use for the cells in the index column.
Named table styles:
- default: blue stripes
- plain: no style
Named cell styles:
- pct: pecentage with two decimal places.
- iso-date: date in YYYY-MM-DD format.
- 2dp: two decimal places.
- 2dpc: thousand separated number to two decimal places.
"""
_styles = {
"default": TableStyle(),
"plain": TableStyle(stripe_colors=None)
}
_named_styles = {
"pct": CellStyle(is_percentage=True, decimal_places=2),
"iso-date": CellStyle(date_format="%Y-%m-%d"),
"2dp": CellStyle(decimal_places=2),
"2dpc": CellStyle(decimal_places=2, thousands_sep=True),
}
def __init__(self,
name,
dataframe,
include_columns=True,
include_index=False,
style="default",
column_styles={},
column_widths={},
row_styles={},
header_style=None,
index_style=None):
self.__name = name
self.__df = dataframe
self.__position = None
self.__include_columns = include_columns
self.__include_index = include_index
self.__column_widths = column_widths
if isinstance(style, TableStyle):
self.__style = style
else:
self.__style = self._styles.get(style)
self.__col_styles = {}
for col, style in column_styles.items():
if isinstance(style, CellStyle):
self.__col_styles[col] = style
else:
self.__col_styles[col] = self._named_styles[style]
self.__row_styles = {}
for row, style in row_styles.items():
if isinstance(style, CellStyle):
self.__row_styles[row] = style
else:
self.__row_styles[row] = self._named_styles[style]
self.header_style = header_style
self.index_style = index_style
def clone(self, **kwargs):
"""Create a clone of the Table, optionally with some properties changed"""
init_kwargs = {
"name": self.__name,
"dataframe": self.__df,
"include_columns": self.__include_columns,
"include_index": self.__include_index,
"style": self.__style,
"column_styles": self.__col_styles,
"column_widths": self.__column_widths,
"row_styles": self.__row_styles,
"header_style": self.header_style,
"index_style": self.index_style
}
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
@property
def name(self):
return self.__name
@property
def dataframe(self):
return self.__df
@property
def style(self):
return self.__style
@property
def column_styles(self):
return self.__col_styles
@property
def row_styles(self):
return self.__row_styles
@property
def column_widths(self):
return self.__column_widths
@property
def cell_styles(self):
"""dict of {(row name, col name): style}"""
styles = {}
for colname, col in self.dataframe.items():
for rowname, value in col.items():
if isinstance(value, Value) and value.style is not None:
style = value.style
if not isinstance(style, CellStyle):
style = self._named_styles[style]
styles[(rowname, colname)] = style
return styles
@property
def width(self):
return len(self.dataframe.columns) + self.row_labels_width
@property
def height(self):
return len(self.dataframe.index) + self.header_height
@property
def header_height(self):
if self.__include_columns:
if isinstance(self.dataframe.columns, pa.MultiIndex):
return len(self.dataframe.columns.names)
return 1
return 0
@property
def row_labels_width(self):
if self.__include_index:
if isinstance(self.dataframe.index, pa.MultiIndex):
return len(self.dataframe.index.names)
return 1
return 0
def get_column_offset(self, col):
try:
offset = self.dataframe.columns.get_loc(col)
except KeyError:
raise KeyError("Column '%s' not found in table %s" % (col, self.name))
offset += self.row_labels_width
return offset
def get_index_offset(self):
if self.__include_index:
return 0
raise KeyError("Table '%s' has no index" % self.name)
def get_row_offset(self, row):
try:
offset = self.dataframe.index.get_loc(row)
except KeyError:
raise KeyError("Row '%s' not found in table %s" % (row, self.name))
offset += self.header_height
return offset
def _get_data_impl(self, workbook, row, col, formula_values={}):
df = self.dataframe.copy()
# replace any Value instances with their value
if df.applymap(lambda x: isinstance(x, Value)).any().any():
df = df.applymap(lambda x: x.value if isinstance(x, Value) else x)
# create a mask for elements that are expressions
mask_df = df.applymap(lambda x: isinstance(x, Expression))
# resolve any expressions if there are any
if mask_df.any().any():
# create a dataframe for indexing both into the dataframe and with the column and
# row numbers.
idx = [[(r, c) for r in range(len((df.index)))] for c in range(len((df.columns)))]
index_df = pa.DataFrame(dict(zip(df.columns, idx)), columns=df.columns, index=df.index)
# convert everything to objects so mask setting works
df = df.astype(object)
col_offset = self.row_labels_width
row_offset = self.header_height
# resolve all elements and set back into the main dataframe
def get_formula(df, element):
if pa.isnull(element):
return element
r, c = element
expr = df.iat[r, c]
r += row_offset
c += col_offset
if expr.has_value:
formula_values[(r + row, c + col)] = expr.value
return expr.get_formula(workbook, r, c)
df[mask_df] = index_df[mask_df].applymap(partial(get_formula, df))
# add the index and or columns to the values part of the dataframe
if self.__include_index or self.__include_columns:
index = df.index
if self.__include_columns:
# add the index names to the top of the index to create a new row for the column headers
if isinstance(index, pa.MultiIndex):
index_names = tuple((x or "" for x in df.index.names))
i = 1
while index_names in df.index:
index_names = tuple(("%s_%d" % (x or "", i) for x in df.index.names))
i += 1
index_tuples = [index_names] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.index.names))
index_tuples = ([blank_tuple] * (len(df.columns.levels) - 1)) + index_tuples
index = pa.MultiIndex.from_tuples(index_tuples)
else:
index_name = df.index.name
i = 1
while index_name in df.index:
index_name = "%s_%d" % (df.index.name, i)
i += 1
index = [index_name] + list(df.index.astype(object))
if isinstance(df.columns, pa.MultiIndex):
index = ([None] * (len(df.columns.levels) - 1)) + index
columns = df.columns
if self.__include_index:
# add the column names to the left of the columns to create a new row for the index headers
if isinstance(columns, pa.MultiIndex):
columns_names = tuple((x or "" for x in df.columns.names))
i = 1
while columns_names in df.columns:
columns_names = tuple(("%s_%d" % (x or "", i) for x in df.columns.names))
i += 1
column_tuples = [columns_names] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
blank_tuple = tuple([None] * len(df.columns.names))
column_tuples = ([blank_tuple] * (len(df.index.levels) - 1)) + column_tuples
columns = pa.MultiIndex.from_tuples(column_tuples)
else:
columns_name = df.columns.name or ""
i = 1
while columns_name in df.columns:
columns_name = "%s_%d" % (df.columns.name, i)
i += 1
columns = [columns_name] + list(df.columns.astype(object))
if isinstance(df.index, pa.MultiIndex):
columns = ([None] * (len(df.index.levels) - 1)) + columns
df = df.reindex(index=index, columns=columns).astype(object)
if self.__include_columns:
if isinstance(df.columns, pa.MultiIndex):
for i in range(len(df.columns.levels)):
df.iloc[i, :] = [c[i] for c in df.columns.values]
else:
df.iloc[0, :] = df.columns
if self.__include_index:
if isinstance(df.index, pa.MultiIndex):
for i in range(len(df.index.levels)):
df.iloc[:, i] = [c[i] for c in df.index.values]
else:
df.iloc[:, 0] = df.index
# return the values as an np array
return df.values
|
fkarb/xltable | xltable/expression.py | _to_addr | python | def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False):
addr = ""
A = ord('A')
col += 1
while col > 0:
addr = chr(A + ((col - 1) % 26)) + addr
col = (col - 1) // 26
prefix = ("'%s'!" % worksheet) if worksheet else ""
col_modifier = "$" if col_fixed else ""
row_modifier = "$" if row_fixed else ""
return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1) | converts a (0,0) based coordinate to an excel address | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L358-L370 | null | """
Expressions for building excel formulas without having to use concrete positions.
"""
import operator
import re
class Expression(object):
"""
Base class for all worksheet expressions.
Expressions are used to build formulas referencing ranges in the
worksheet by labels which are resolved to cell references when the
worksheet is written out.
Expressions may be combined using binary operators.
"""
def __init__(self, value=None):
if value is not None:
self.value = value
def __add__(self, other):
return BinOp(self, _make_expr(other), "+")
def __sub__(self, other):
return BinOp(self, _make_expr(other), "-")
def __mul__(self, other):
return BinOp(self, _make_expr(other), "*")
def __truediv__(self, other):
return BinOp(self, _make_expr(other), "/")
def __lt__(self, other):
return BinOp(self, _make_expr(other), "<")
def __le__(self, other):
return BinOp(self, _make_expr(other), "<=")
def __eq__(self, other):
return BinOp(self, _make_expr(other), "=")
def __ne__(self, other):
return BinOp(self, _make_expr(other), "<>")
def __gt__(self, other):
return BinOp(self, _make_expr(other), ">")
def __ge__(self, other):
return BinOp(self, _make_expr(other), ">=")
def __and__(self, other):
return BinOp(self, _make_expr(other), "&")
def get_formula(self, workbook, row, col):
return "=%s" % self._strip(self.resolve(workbook, row, col))
@property
def value(self):
"""Set a calculated value for this Expression.
Used when writing formulas using XlsxWriter to give cells
an initial value when the sheet is loaded without being calculated.
"""
try:
if isinstance(self.__value, Expression):
return self.__value.value
return self.__value
except AttributeError:
return 0
@property
def has_value(self):
"""return True if value has been set"""
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False
@value.setter
def value(self, value):
self.__value = value
@staticmethod
def _strip(x):
# strip off the outer parentheses if they match
return re.sub("^\((.*)\)$", r"\1", x)
def resolve(self, workbook, worksheet, col, row):
raise NotImplementedError("Expression.resolve")
class Cell(Expression):
"""
Reference to a cell in a table.
:param col: Column label this refers to.
:param row: Row label this refers to, or None to use the current row.
:param row_offset: Offset from the row, used when resolving.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, col, row=None, row_offset=0, table=None, col_fixed=None, row_fixed=None, **kwargs):
super(Cell, self).__init__(**kwargs)
self.__col = col
self.__row = row
self.__row_offset = row_offset
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
# if the row has been given use fixed references in the formula unless they've been set explicitly
if self.__row is not None:
row = table.get_row_offset(self.__row)
row_fixed = self.__row_fixed if self.__row_fixed is not None else True
col_fixed = self.__col_fixed if self.__col_fixed is not None else True
else:
# otherwise use un-fixed addresses, unless set explicitly
row_fixed = self.__row_fixed if self.__row_fixed is not None else False
col_fixed = self.__col_fixed if self.__col_fixed is not None else False
return _to_addr(worksheet.name,
top + row + self.__row_offset,
left + col_offset,
row_fixed=row_fixed,
col_fixed=col_fixed)
class Column(Expression):
"""
Reference to a column in a table.
:param col: Column label this refers to.
:param include_header: True if this expression should include the column header.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, col, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Column, self).__init__(**kwargs)
self.__col = col
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Index(Expression):
"""
Reference to a table's index.
:param include_header: True if this expression should include the index header.
:param table: Name of table that owns the index, if not the table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Index, self).__init__(**kwargs)
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_index_offset()
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Range(Expression):
"""
Reference to a range in a table.
:param left_col: Left most column label this refers to.
:param right_col: Right most column label this refers to.
:param top_row: Top most row label, or None to select from the top of the table.
:param bottom_row: Bottom most row label, or None to select to the bottom of the table.
:param include_header: Include table header in the range.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self,
left_col,
right_col,
top_row=None,
bottom_row=None,
include_header=True,
table=None,
col_fixed=True,
row_fixed=True,
**kwargs):
super(Range, self).__init__(**kwargs)
self.__left_col = left_col
self.__right_col = right_col
self.__top = top_row
self.__bottom = bottom_row
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
left_col_offset = table.get_column_offset(self.__left_col)
right_col_offset = table.get_column_offset(self.__right_col)
if self.__top is None:
top_row_offset = 0 if self.__include_header else table.header_height
else:
top_row_offset = table.get_row_offset(self.__top)
if self.__bottom is None:
bottom_row_offset = table.height - 1
else:
bottom_row_offset = table.get_row_offset(self.__bottom)
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + top_row_offset, left + left_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + bottom_row_offset, left + right_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Formula(Expression):
"""
Formula expression.
E.g. to create a formula like "=SUMPRODUCT(a, b)" where a and b
are columns in a table you would do::
formula = Formula("SUMPRODUCT", Column("col_a"), Column("col_b"))
:param name: Name of Excel function, eg "SUMPRODUCT".
:param args: Expressions to use as arguments to the function.
"""
def __init__(self, name, *args, **kwargs):
super(Formula, self).__init__(**kwargs)
self.__name = name
self.__args = args
def resolve(self, workbook, row, col):
def to_arg(x):
if x is None:
return ""
return self._strip(_make_expr(x).resolve(workbook, row, col))
args = [to_arg(x) for x in self.__args]
return "%s(%s)" % (self.__name, ",".join(args))
class ArrayExpression(Expression):
"""
Wraps an expression in an array formula (ie. surrounds it with {})
:param xltable.Expression expr: Expression to be wrapped
"""
def __init__(self, expr):
Expression.__init__(self, expr)
self.__expr = expr
def resolve(self, workbook, row, col):
return self.__expr.resolve(workbook, row, col)
def get_formula(self, workbook, row, col):
return "{%s}" % self.__expr.get_formula(workbook, row, col).strip("{}")
class BinOp(Expression):
"""
Internal use - composite expression combining two expression with a binary operator.
"""
__operators = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
">": operator.gt,
"<": operator.lt,
"<=": operator.le,
">=": operator.ge,
"!=": operator.ne,
"=": operator.eq,
"&": operator.and_,
"|": operator.or_,
}
def __init__(self, lhs, rhs, op, **kwargs):
super(BinOp, self).__init__(**kwargs)
self.__lhs = lhs
self.__rhs = rhs
self.__op = op
if lhs.has_value and rhs.has_value:
self.value = self.__operators[op](lhs.value, rhs.value)
def resolve(self, workbook, row, col):
return "(%s%s%s)" % (
self.__lhs.resolve(workbook, row, col),
self.__op,
self.__rhs.resolve(workbook, row, col))
class ConstExpr(Expression):
"""
Internal use - expression for wrapping constants.
"""
def __init__(self, value, **kwargs):
super(ConstExpr, self).__init__(**kwargs)
self.value = value
self.__value = value
def resolve(self, workbook, row, col):
if isinstance(self.__value, str):
return '"%s"' % self.__value
if isinstance(self.__value, bool):
return "TRUE" if self.__value else "FALSE"
return str(self.__value)
def _make_expr(x):
if isinstance(x, Expression):
return x
return ConstExpr(x)
|
fkarb/xltable | xltable/expression.py | Expression.value | python | def value(self):
try:
if isinstance(self.__value, Expression):
return self.__value.value
return self.__value
except AttributeError:
return 0 | Set a calculated value for this Expression.
Used when writing formulas using XlsxWriter to give cells
an initial value when the sheet is loaded without being calculated. | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L59-L69 | null | class Expression(object):
"""
Base class for all worksheet expressions.
Expressions are used to build formulas referencing ranges in the
worksheet by labels which are resolved to cell references when the
worksheet is written out.
Expressions may be combined using binary operators.
"""
def __init__(self, value=None):
if value is not None:
self.value = value
def __add__(self, other):
return BinOp(self, _make_expr(other), "+")
def __sub__(self, other):
return BinOp(self, _make_expr(other), "-")
def __mul__(self, other):
return BinOp(self, _make_expr(other), "*")
def __truediv__(self, other):
return BinOp(self, _make_expr(other), "/")
def __lt__(self, other):
return BinOp(self, _make_expr(other), "<")
def __le__(self, other):
return BinOp(self, _make_expr(other), "<=")
def __eq__(self, other):
return BinOp(self, _make_expr(other), "=")
def __ne__(self, other):
return BinOp(self, _make_expr(other), "<>")
def __gt__(self, other):
return BinOp(self, _make_expr(other), ">")
def __ge__(self, other):
return BinOp(self, _make_expr(other), ">=")
def __and__(self, other):
return BinOp(self, _make_expr(other), "&")
def get_formula(self, workbook, row, col):
return "=%s" % self._strip(self.resolve(workbook, row, col))
@property
@property
def has_value(self):
"""return True if value has been set"""
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False
@value.setter
def value(self, value):
self.__value = value
@staticmethod
def _strip(x):
# strip off the outer parentheses if they match
return re.sub("^\((.*)\)$", r"\1", x)
def resolve(self, workbook, worksheet, col, row):
raise NotImplementedError("Expression.resolve")
|
fkarb/xltable | xltable/expression.py | Expression.has_value | python | def has_value(self):
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False | return True if value has been set | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/expression.py#L72-L79 | null | class Expression(object):
"""
Base class for all worksheet expressions.
Expressions are used to build formulas referencing ranges in the
worksheet by labels which are resolved to cell references when the
worksheet is written out.
Expressions may be combined using binary operators.
"""
def __init__(self, value=None):
if value is not None:
self.value = value
def __add__(self, other):
return BinOp(self, _make_expr(other), "+")
def __sub__(self, other):
return BinOp(self, _make_expr(other), "-")
def __mul__(self, other):
return BinOp(self, _make_expr(other), "*")
def __truediv__(self, other):
return BinOp(self, _make_expr(other), "/")
def __lt__(self, other):
return BinOp(self, _make_expr(other), "<")
def __le__(self, other):
return BinOp(self, _make_expr(other), "<=")
def __eq__(self, other):
return BinOp(self, _make_expr(other), "=")
def __ne__(self, other):
return BinOp(self, _make_expr(other), "<>")
def __gt__(self, other):
return BinOp(self, _make_expr(other), ">")
def __ge__(self, other):
return BinOp(self, _make_expr(other), ">=")
def __and__(self, other):
return BinOp(self, _make_expr(other), "&")
def get_formula(self, workbook, row, col):
return "=%s" % self._strip(self.resolve(workbook, row, col))
@property
def value(self):
"""Set a calculated value for this Expression.
Used when writing formulas using XlsxWriter to give cells
an initial value when the sheet is loaded without being calculated.
"""
try:
if isinstance(self.__value, Expression):
return self.__value.value
return self.__value
except AttributeError:
return 0
@property
@value.setter
def value(self, value):
self.__value = value
@staticmethod
def _strip(x):
# strip off the outer parentheses if they match
return re.sub("^\((.*)\)$", r"\1", x)
def resolve(self, workbook, worksheet, col, row):
raise NotImplementedError("Expression.resolve")
|
fkarb/xltable | xltable/worksheet.py | _to_pywintypes | python | def _to_pywintypes(row):
def _pywintype(x):
if isinstance(x, dt.date):
return dt.datetime(x.year, x.month, x.day, tzinfo=dt.timezone.utc)
elif isinstance(x, (dt.datetime, pa.Timestamp)):
if x.tzinfo is None:
return x.replace(tzinfo=dt.timezone.utc)
elif isinstance(x, str):
if re.match("^\d{4}-\d{2}-\d{2}$", x):
return "'" + x
return x
elif isinstance(x, np.integer):
return int(x)
elif isinstance(x, np.floating):
return float(x)
elif x is not None and not isinstance(x, (str, int, float, bool)):
return str(x)
return x
return [_pywintype(x) for x in row] | convert values in a row to types accepted by excel | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L640-L666 | null | """
A worksheet is a collection of tables placed at specific locations.
Once all tables have been placed the worksheet can be written out or
the rows can be iterated over, and any expressions present in the
tables will be resolved to absolute cell references.
"""
from .style import CellStyle
from .table import ArrayFormula, Value
from .expression import Expression
import re
import datetime as dt
import pandas as pa
import numpy as np
from copy import copy
class Worksheet(object):
"""
A worksheet is a collection of tables placed at specific locations.
Once all tables have been placed the worksheet can be written out or
the rows can be iterated over, and any expressions present in the
tables will be resolved to absolute cell references.
:param str name: Worksheet name.
"""
_xlsx_unsupported_types = tuple()
def __init__(self, name="Sheet1"):
self.__name = name
self.__tables = {}
self.__values = {}
self.__charts = []
self.__next_row = 0
self.__groups = []
@property
def name(self):
"""Worksheet name"""
return self.__name
def add_table(self, table, row=None, col=0, row_spaces=1):
"""
Adds a table to the worksheet at (row, col).
Return the (row, col) where the table has been put.
:param xltable.Table table: Table to add to the worksheet.
:param int row: Row to start the table at (defaults to the next free row).
:param int col: Column to start the table at.
:param int row_spaces: Number of rows to leave between this table and the next.
"""
name = table.name
assert name is not None, "Tables must have a name"
assert name not in self.__tables, "Table %s already exists in this worksheet" % name
if row is None:
row = self.__next_row
self.__next_row = max(row + table.height + row_spaces, self.__next_row)
self.__tables[name] = (table, (row, col))
return row, col
def add_value(self, value, row, col):
"""
Adds a single value (cell) to a worksheet at (row, col).
Return the (row, col) where the value has been put.
:param value: Value to write to the sheet.
:param row: Row where the value should be written.
:param col: Column where the value should be written.
"""
self.__values[(row, col)] = value
def add_chart(self, chart, row, col):
"""
Adds a chart to the worksheet at (row, col).
:param xltable.Chart Chart: chart to add to the workbook.
:param int row: Row to add the chart at.
"""
self.__charts.append((chart, (row, col)))
def add_row_group(self, tables, collapsed=True):
"""
Adds a group over all the given tables (will include any rows between the first row over all
tables, and the last row over all tables)
Initially collapsed if collapsed is True (True by default)
"""
self.__groups.append((tables, collapsed))
@property
def next_row(self):
"""Row the next table will start at unless another row is specified."""
return self.__next_row
@next_row.setter
def next_row(self, value):
self.__next_row = value
def get_table_pos(self, tablename):
"""
:param str tablename: Name of table to get position of.
:return: Upper left (row, col) coordinate of the named table.
"""
_table, (row, col) = self.__tables[tablename]
return (row, col)
def get_table(self, tablename):
"""
:param str tablename: Name of table to find.
:return: A :py:class:`xltable.Table` instance from the table name.
"""
table, (_row, _col) = self.__tables[tablename]
return table
def iterrows(self, workbook=None):
"""
Yield rows as lists of data.
The data is exactly as it is in the source pandas DataFrames and
any formulas are not resolved.
"""
resolved_tables = []
max_height = 0
max_width = 0
# while yielding rows __formula_values is updated with any formula values set on Expressions
self.__formula_values = {}
for name, (table, (row, col)) in list(self.__tables.items()):
# get the resolved 2d data array from the table
#
# expressions with no explicit table will use None when calling
# get_table/get_table_pos, which should return the current table.
#
self.__tables[None] = (table, (row, col))
data = table.get_data(workbook, row, col, self.__formula_values)
del self.__tables[None]
height, width = data.shape
upper_left = (row, col)
lower_right = (row + height - 1, col + width - 1)
max_height = max(max_height, lower_right[0] + 1)
max_width = max(max_width, lower_right[1] + 1)
resolved_tables.append((name, data, upper_left, lower_right))
for row, col in self.__values.keys():
max_width = max(max_width, row+1)
max_height = max(max_height, col+1)
# Build the whole table up-front. Doing it row by row is too slow.
table = [[None] * max_width for i in range(max_height)]
for name, data, upper_left, lower_right in resolved_tables:
for i, r in enumerate(range(upper_left[0], lower_right[0]+1)):
for j, c in enumerate(range(upper_left[1], lower_right[1]+1)):
table[r][c] = data[i][j]
for (r, c), value in self.__values.items():
if isinstance(value, Value):
value = value.value
if isinstance(value, Expression):
if value.has_value:
self.__formula_values[(r, c)] = value.value
value = value.get_formula(workbook, r, c)
table[r][c] = value
for row in table:
yield row
def to_csv(self, writer):
"""
Writes worksheet to a csv.writer object.
:param writer: csv writer instance.
"""
for row in self.iterrows():
writer.writerow(row)
def _get_column_widths(self):
"""return a dictionary of {col -> width}"""
col_widths = {}
for table, (row, col) in self.__tables.values():
for colname, width in table.column_widths.items():
ic = col + table.get_column_offset(colname)
current_width = col_widths.setdefault(ic, width)
col_widths[ic] = max(width, current_width)
return col_widths
def _get_all_styles(self):
"""
return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style.
"""
_styles = {}
def _get_style(bold=False, bg_col=None, border=None):
if (bold, bg_col, border) not in _styles:
_styles[(bold, bg_col, border)] = CellStyle(bold=bold,
bg_color=bg_col,
border=border)
return _styles[(bold, bg_col, border)]
ws_styles = {}
for table, (row, col) in self.__tables.values():
for r in range(row, row + table.header_height):
for c in range(col, col + table.width):
if isinstance(table.header_style, dict):
col_name = table.dataframe.columns[c - col]
style = table.header_style.get(col_name, _get_style(bold=True))
else:
style = table.header_style or _get_style(bold=True)
ws_styles[(r, c)] = style
for c in range(col, col + table.row_labels_width):
for r in range(row + table.header_height, row + table.height):
if isinstance(table.index_style, dict):
row_name = table.dataframe.index[r - row]
style = table.index_style.get(row_name, _get_style(bold=True))
else:
style = table.index_style or _get_style(bold=True)
ws_styles[(r, c)] = style
if table.style.stripe_colors or table.style.border:
num_bg_cols = len(table.style.stripe_colors) if \
table.style.stripe_colors else 1
bg_cols = table.style.stripe_colors if \
table.style.stripe_colors else None
for i, row_offset in enumerate(range(table.header_height,
table.height)):
for c in range(col, col + table.width):
bg_col = bg_cols[i % num_bg_cols] if bg_cols else None
style = _get_style(bold=None, bg_col=bg_col, border=table.style.border)
if (row + row_offset, c) in ws_styles:
style = style + ws_styles[(row + row_offset, c)]
ws_styles[(row + row_offset, c)] = style
for col_name, col_style in table.column_styles.items():
try:
col_offset = table.get_column_offset(col_name)
except KeyError:
continue
for i, r in enumerate(range(row + table.header_height, row + table.height)):
style = col_style
if (r, col + col_offset) in ws_styles:
style = ws_styles[(r, col + col_offset)] + style
ws_styles[(r, col + col_offset)] = style
for row_name, row_style in table.row_styles.items():
try:
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
for i, c in enumerate(range(col + table.row_labels_width, col + table.width)):
style = row_style
if (row + row_offset, c) in ws_styles:
style = ws_styles[(row + row_offset, c)] + style
ws_styles[(row + row_offset, c)] = style
for (row_name, col_name), cell_style in table.cell_styles.items():
try:
col_offset = table.get_column_offset(col_name)
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
style = cell_style
if (row + row_offset, col + col_offset) in ws_styles:
style = ws_styles[(row + row_offset, col + col_offset)] + style
ws_styles[(row + row_offset, col + col_offset)] = style
for (row, col), value in self.__values.items():
if isinstance(value, Value):
style = value.style
if style:
if (row, col) in ws_styles:
style = style + ws_styles[(row, col)]
ws_styles[(row, col)] = style
return ws_styles
def to_excel(self,
workbook=None,
worksheet=None,
xl_app=None,
clear=True,
rename=True,
resize_columns=True):
"""
Writes worksheet to an Excel Worksheet COM object.
Requires :py:module:`pywin32` to be installed.
:param workbook: xltable.Workbook this sheet belongs to.
:param worksheet: Excel COM Worksheet instance to write to.
:param xl_app: Excel COM Excel Application to write to.
:param bool clear: If a worksheet is provided, clear worksheet before writing.
:param bool rename: If a worksheet is provided, rename self to match the worksheet.
:param bool resize_columns: Resize sheet columns after writing.
"""
from win32com.client import Dispatch, constants, gencache
if xl_app is None:
if worksheet is not None:
xl_app = worksheet.Parent.Application
elif workbook is not None and hasattr(workbook.workbook_obj, "Application"):
xl_app = workbook.workbook_obj.Application
else:
xl_app = Dispatch("Excel.Application")
xl = xl_app = gencache.EnsureDispatch(xl_app)
# Create a workbook if there isn't one already
if not workbook:
from .workbook import Workbook
workbook = Workbook(worksheets=[self])
if worksheet is None:
# If there's no worksheet then call Workbook.to_excel which will create one
return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns)
if rename:
self.__name = worksheet.Name
# set manual calculation and turn off screen updating while we update the cells
calculation = xl.Calculation
screen_updating = xl.ScreenUpdating
xl.Calculation = constants.xlCalculationManual
xl.ScreenUpdating = False
try:
# clear the worksheet and reset the styles
if clear:
worksheet.Cells.ClearContents()
worksheet.Cells.Font.Bold = False
worksheet.Cells.Font.Size = 11
worksheet.Cells.Font.Color = 0x000000
worksheet.Cells.Interior.ColorIndex = 0
worksheet.Cells.NumberFormat = "General"
# get any array formula tables
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
"""returns True if this formula cell is part of an array formula table"""
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
origin = worksheet.Range("A1")
xl_cell = origin
for r, row in enumerate(self.iterrows(workbook)):
row = _to_pywintypes(row)
# set the value and formulae to the excel range (it's much quicker to
# write a row at a time and update the formula than it is it do it
# cell by cell)
if clear:
xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row)))
xl_row.Value = row
else:
for c, value in enumerate(row):
if value is not None:
xl_cell.Offset(1, 1 + c).Value = value
for c, value in enumerate(row):
if isinstance(value, str):
if value.startswith("="):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).Formula = value
elif value.startswith("{=") \
and not _is_in_array_formula_table(r, c):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).FormulaArray = value
# move to the next row
xl_cell = xl_cell.Offset(2, 1)
# set any array formulas
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
data = table.get_data(workbook, row, col)
height, width = data.shape
upper_left = origin.Offset(row+1, col+1)
lower_right = origin.Offset(row + height, col + width)
xl_range = worksheet.Range(upper_left, lower_right)
xl_range.FormulaArray = table.formula.get_formula(workbook, row, col)
# set any formatting
for (row, col), style in self._get_all_styles().items():
r = origin.Offset(1 + row, 1 + col)
if style.bold:
r.Font.Bold = True
if style.excel_number_format is not None:
r.NumberFormat = style.excel_number_format
if style.size is not None:
r.Font.Size = style.size
if style.text_color is not None:
r.Font.Color = _to_bgr(style.text_color)
if style.bg_color is not None:
r.Interior.Color = _to_bgr(style.bg_color)
if style.text_wrap or style.border:
raise Exception("text wrap and border not implemented")
# add any charts
for chart, (row, col) in self.__charts:
top_left = origin.Offset(1 + row, 1 + col)
xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart
xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype)
if chart.title:
xl_chart.ChartTitle = chart.title
for series in chart.iter_series(self, row, col):
xl_series = xl_chart.SeriesCollection().NewSeries()
xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("="))
if "categories" in series:
xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("="))
if "name" in series:
xl_series.Name = series["name"]
finally:
xl.ScreenUpdating = screen_updating
xl.Calculation = calculation
if resize_columns:
try:
worksheet.Cells.EntireColumn.AutoFit()
except:
pass
def to_xlsx(self, filename=None, workbook=None):
"""
Write worksheet to a .xlsx file using xlsxwriter.
:param str filename: Filename to write to. If None no file is written.
:param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook
will be created with this worksheet as the only sheet.
:return: :py:class:`xlsxwriter.workbook.Workbook` instance.
"""
from .workbook import Workbook
if not workbook:
workbook = Workbook(filename=filename)
workbook.append(self)
return workbook.to_xlsx()
ws = workbook.add_xlsx_worksheet(self, self.name)
_styles = {}
def _get_xlsx_style(cell_style):
"""
convert rb.excel style to xlsx writer style
"""
style_args = (
cell_style.bold,
cell_style.excel_number_format,
cell_style.text_color,
cell_style.bg_color,
cell_style.size,
cell_style.text_wrap,
cell_style.text_wrap,
cell_style.border,
cell_style.align,
cell_style.valign
)
if (style_args) not in _styles:
style = workbook.add_format()
if cell_style.bold:
style.set_bold()
if cell_style.excel_number_format is not None:
style.set_num_format(cell_style.excel_number_format)
if cell_style.text_color is not None:
style.set_font_color("#%06x" % cell_style.text_color)
if cell_style.bg_color is not None:
style.set_bg_color("#%06x" % cell_style.bg_color)
if cell_style.size is not None:
style.set_font_size(cell_style.size)
if cell_style.text_wrap:
style.set_text_wrap()
if cell_style.border:
if isinstance(cell_style.border, frozenset):
for border_position, border_style in cell_style.border:
if border_position == "bottom":
style.set_bottom(border_style)
elif border_position == "top":
style.set_top(border_style)
elif border_position == "left":
style.set_left(border_style)
elif border_position == "right":
style.set_right(border_style)
else:
raise AssertionError("Unknown border position '%s'." % border_position)
else:
style.set_border(cell_style.border)
if cell_style.align:
style.set_align(cell_style.align)
if cell_style.valign:
style.set_valign(cell_style.valign)
_styles[style_args] = style
return _styles[style_args]
# pre-compute the cells with non-default styles
ws_styles = self._get_all_styles()
ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()}
plain_style = _get_xlsx_style(CellStyle())
# get any array formula tables
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
"""returns True if this formula cell is part of an array formula table"""
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
# write the rows to the worksheet
for ir, row in enumerate(self.iterrows(workbook)):
for ic, cell in enumerate(row):
style = ws_styles.get((ir, ic), plain_style)
if isinstance(cell, str):
if cell.startswith("="):
formula_value = self.__formula_values.get((ir, ic), 0)
ws.write_formula(ir, ic, cell, style, value=formula_value)
elif cell.startswith("{="):
# array formulas tables are written after everything else,
# but individual cells can also be array formulas
if not _is_in_array_formula_table(ir, ic):
formula_value = self.__formula_values.get((ir, ic), 0)
ws.write_array_formula(ir, ic, ir, ic,
cell, style,
value=formula_value)
else:
ws.write(ir, ic, cell, style)
else:
if isinstance(cell, self._xlsx_unsupported_types):
ws.write(ir, ic, str(cell), style)
else:
try:
ws.write(ir, ic, cell, style)
except TypeError:
ws.write(ir, ic, str(cell), style)
unsupported_types = set(self._xlsx_unsupported_types)
unsupported_types.add(type(cell))
self.__class__._xlsx_unsupported_types = tuple(unsupported_types)
# set any array formulas
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
style = ws_styles.get((row, col), plain_style)
data = table.get_data(workbook, row, col)
height, width = data.shape
bottom, right = (row + height - 1, col + width -1)
formula = table.formula.get_formula(workbook, row, col)
ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0])
for y in range(height):
for x in range(width):
if y == 0 and x == 0:
continue
ir, ic = row + y, col + x
style = ws_styles.get((ir, ic), plain_style)
cell = data[y][x]
if isinstance(cell, str):
cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii")
ws.write_formula(ir, ic, cell_str, style)
else:
ws.write(ir, ic, cell, style)
# set any non-default column widths
for ic, width in self._get_column_widths().items():
ws.set_column(ic, ic, width)
# add any charts
for chart, (row, col) in self.__charts:
kwargs = {"type": chart.type}
if chart.subtype:
kwargs["subtype"] = chart.subtype
xl_chart = workbook.workbook_obj.add_chart(kwargs)
if chart.show_blanks:
xl_chart.show_blanks_as(chart.show_blanks)
for series in chart.iter_series(workbook, row, col):
# xlsxwriter expects the sheetname in the formula
values = series.get("values")
if isinstance(values, str) and values.startswith("=") and "!" not in values:
series["values"] = "='%s'!%s" % (self.name, values.lstrip("="))
categories = series.get("categories")
if isinstance(categories, str) and categories.startswith("=") and "!" not in categories:
series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("="))
xl_chart.add_series(series)
xl_chart.set_size({"width": chart.width, "height": chart.height})
if chart.title:
xl_chart.set_title({"name": chart.title})
if chart.legend_position:
xl_chart.set_legend({"position": chart.legend_position})
if chart.x_axis:
xl_chart.set_x_axis(chart.x_axis)
if chart.y_axis:
xl_chart.set_y_axis(chart.y_axis)
ws.insert_chart(row, col, xl_chart)
# add any groups
for tables, collapsed in self.__groups:
min_row, max_row = 1000000, -1
for table, (row, col) in self.__tables.values():
if table in tables:
min_row = min(min_row, row)
max_row = max(max_row, row + table.height)
for i in range(min_row, max_row+1):
ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed})
if filename:
workbook.close()
return workbook
def _to_bgr(rgb):
"""excel expects colors as BGR instead of the usual RGB"""
if rgb is None:
return None
return ((rgb >> 16) & 0xff) + (rgb & 0xff00) + ((rgb & 0xff) << 16)
def _to_excel_chart_type(type, subtype):
from win32com.client import constants
return {
"area": {
None: constants.xlArea,
"stacked": constants.xlAreaStacked,
"percent_stacked": constants.xlAreaStacked100,
},
"bar": {
None: constants.xlBar,
"stacked": constants.xlBarStacked,
"percent_stacked": constants.xlBarStacked100,
},
"column": {
"stacked": constants.xlColumnStacked,
"percent_stacked": constants.xlColumnStacked100,
},
"line": {
None: constants.xlLine,
},
"scatter": {
None: constants.xlXYScatter,
"straight_with_markers": constants.xlXYScatterLines,
"straight": constants.xlXYScatterLinesNoMarkers,
"smooth_with_markers": constants.xlXYScatterSmooth,
"smooth": constants.xlXYScatterSmoothNoMarkers,
},
"stock": {
None: constants.xlStockHLC,
},
"radar": {
None: constants.xlRadar,
"with_markers": constants.xlRadarMarkers,
"filled": constants.xlRadarFilled,
},
}[type][subtype]
|
fkarb/xltable | xltable/worksheet.py | Worksheet.add_table | python | def add_table(self, table, row=None, col=0, row_spaces=1):
name = table.name
assert name is not None, "Tables must have a name"
assert name not in self.__tables, "Table %s already exists in this worksheet" % name
if row is None:
row = self.__next_row
self.__next_row = max(row + table.height + row_spaces, self.__next_row)
self.__tables[name] = (table, (row, col))
return row, col | Adds a table to the worksheet at (row, col).
Return the (row, col) where the table has been put.
:param xltable.Table table: Table to add to the worksheet.
:param int row: Row to start the table at (defaults to the next free row).
:param int col: Column to start the table at.
:param int row_spaces: Number of rows to leave between this table and the next. | train | https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/worksheet.py#L43-L60 | null | class Worksheet(object):
"""
A worksheet is a collection of tables placed at specific locations.
Once all tables have been placed the worksheet can be written out or
the rows can be iterated over, and any expressions present in the
tables will be resolved to absolute cell references.
:param str name: Worksheet name.
"""
_xlsx_unsupported_types = tuple()
def __init__(self, name="Sheet1"):
self.__name = name
self.__tables = {}
self.__values = {}
self.__charts = []
self.__next_row = 0
self.__groups = []
@property
def name(self):
"""Worksheet name"""
return self.__name
def add_value(self, value, row, col):
"""
Adds a single value (cell) to a worksheet at (row, col).
Return the (row, col) where the value has been put.
:param value: Value to write to the sheet.
:param row: Row where the value should be written.
:param col: Column where the value should be written.
"""
self.__values[(row, col)] = value
def add_chart(self, chart, row, col):
"""
Adds a chart to the worksheet at (row, col).
:param xltable.Chart Chart: chart to add to the workbook.
:param int row: Row to add the chart at.
"""
self.__charts.append((chart, (row, col)))
def add_row_group(self, tables, collapsed=True):
"""
Adds a group over all the given tables (will include any rows between the first row over all
tables, and the last row over all tables)
Initially collapsed if collapsed is True (True by default)
"""
self.__groups.append((tables, collapsed))
@property
def next_row(self):
"""Row the next table will start at unless another row is specified."""
return self.__next_row
@next_row.setter
def next_row(self, value):
self.__next_row = value
def get_table_pos(self, tablename):
"""
:param str tablename: Name of table to get position of.
:return: Upper left (row, col) coordinate of the named table.
"""
_table, (row, col) = self.__tables[tablename]
return (row, col)
def get_table(self, tablename):
"""
:param str tablename: Name of table to find.
:return: A :py:class:`xltable.Table` instance from the table name.
"""
table, (_row, _col) = self.__tables[tablename]
return table
def iterrows(self, workbook=None):
"""
Yield rows as lists of data.
The data is exactly as it is in the source pandas DataFrames and
any formulas are not resolved.
"""
resolved_tables = []
max_height = 0
max_width = 0
# while yielding rows __formula_values is updated with any formula values set on Expressions
self.__formula_values = {}
for name, (table, (row, col)) in list(self.__tables.items()):
# get the resolved 2d data array from the table
#
# expressions with no explicit table will use None when calling
# get_table/get_table_pos, which should return the current table.
#
self.__tables[None] = (table, (row, col))
data = table.get_data(workbook, row, col, self.__formula_values)
del self.__tables[None]
height, width = data.shape
upper_left = (row, col)
lower_right = (row + height - 1, col + width - 1)
max_height = max(max_height, lower_right[0] + 1)
max_width = max(max_width, lower_right[1] + 1)
resolved_tables.append((name, data, upper_left, lower_right))
for row, col in self.__values.keys():
max_width = max(max_width, row+1)
max_height = max(max_height, col+1)
# Build the whole table up-front. Doing it row by row is too slow.
table = [[None] * max_width for i in range(max_height)]
for name, data, upper_left, lower_right in resolved_tables:
for i, r in enumerate(range(upper_left[0], lower_right[0]+1)):
for j, c in enumerate(range(upper_left[1], lower_right[1]+1)):
table[r][c] = data[i][j]
for (r, c), value in self.__values.items():
if isinstance(value, Value):
value = value.value
if isinstance(value, Expression):
if value.has_value:
self.__formula_values[(r, c)] = value.value
value = value.get_formula(workbook, r, c)
table[r][c] = value
for row in table:
yield row
def to_csv(self, writer):
"""
Writes worksheet to a csv.writer object.
:param writer: csv writer instance.
"""
for row in self.iterrows():
writer.writerow(row)
def _get_column_widths(self):
"""return a dictionary of {col -> width}"""
col_widths = {}
for table, (row, col) in self.__tables.values():
for colname, width in table.column_widths.items():
ic = col + table.get_column_offset(colname)
current_width = col_widths.setdefault(ic, width)
col_widths[ic] = max(width, current_width)
return col_widths
def _get_all_styles(self):
"""
return a dictionary of {(row, col) -> CellStyle}
for all cells that use a non-default style.
"""
_styles = {}
def _get_style(bold=False, bg_col=None, border=None):
if (bold, bg_col, border) not in _styles:
_styles[(bold, bg_col, border)] = CellStyle(bold=bold,
bg_color=bg_col,
border=border)
return _styles[(bold, bg_col, border)]
ws_styles = {}
for table, (row, col) in self.__tables.values():
for r in range(row, row + table.header_height):
for c in range(col, col + table.width):
if isinstance(table.header_style, dict):
col_name = table.dataframe.columns[c - col]
style = table.header_style.get(col_name, _get_style(bold=True))
else:
style = table.header_style or _get_style(bold=True)
ws_styles[(r, c)] = style
for c in range(col, col + table.row_labels_width):
for r in range(row + table.header_height, row + table.height):
if isinstance(table.index_style, dict):
row_name = table.dataframe.index[r - row]
style = table.index_style.get(row_name, _get_style(bold=True))
else:
style = table.index_style or _get_style(bold=True)
ws_styles[(r, c)] = style
if table.style.stripe_colors or table.style.border:
num_bg_cols = len(table.style.stripe_colors) if \
table.style.stripe_colors else 1
bg_cols = table.style.stripe_colors if \
table.style.stripe_colors else None
for i, row_offset in enumerate(range(table.header_height,
table.height)):
for c in range(col, col + table.width):
bg_col = bg_cols[i % num_bg_cols] if bg_cols else None
style = _get_style(bold=None, bg_col=bg_col, border=table.style.border)
if (row + row_offset, c) in ws_styles:
style = style + ws_styles[(row + row_offset, c)]
ws_styles[(row + row_offset, c)] = style
for col_name, col_style in table.column_styles.items():
try:
col_offset = table.get_column_offset(col_name)
except KeyError:
continue
for i, r in enumerate(range(row + table.header_height, row + table.height)):
style = col_style
if (r, col + col_offset) in ws_styles:
style = ws_styles[(r, col + col_offset)] + style
ws_styles[(r, col + col_offset)] = style
for row_name, row_style in table.row_styles.items():
try:
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
for i, c in enumerate(range(col + table.row_labels_width, col + table.width)):
style = row_style
if (row + row_offset, c) in ws_styles:
style = ws_styles[(row + row_offset, c)] + style
ws_styles[(row + row_offset, c)] = style
for (row_name, col_name), cell_style in table.cell_styles.items():
try:
col_offset = table.get_column_offset(col_name)
row_offset = table.get_row_offset(row_name)
except KeyError:
continue
style = cell_style
if (row + row_offset, col + col_offset) in ws_styles:
style = ws_styles[(row + row_offset, col + col_offset)] + style
ws_styles[(row + row_offset, col + col_offset)] = style
for (row, col), value in self.__values.items():
if isinstance(value, Value):
style = value.style
if style:
if (row, col) in ws_styles:
style = style + ws_styles[(row, col)]
ws_styles[(row, col)] = style
return ws_styles
def to_excel(self,
workbook=None,
worksheet=None,
xl_app=None,
clear=True,
rename=True,
resize_columns=True):
"""
Writes worksheet to an Excel Worksheet COM object.
Requires :py:module:`pywin32` to be installed.
:param workbook: xltable.Workbook this sheet belongs to.
:param worksheet: Excel COM Worksheet instance to write to.
:param xl_app: Excel COM Excel Application to write to.
:param bool clear: If a worksheet is provided, clear worksheet before writing.
:param bool rename: If a worksheet is provided, rename self to match the worksheet.
:param bool resize_columns: Resize sheet columns after writing.
"""
from win32com.client import Dispatch, constants, gencache
if xl_app is None:
if worksheet is not None:
xl_app = worksheet.Parent.Application
elif workbook is not None and hasattr(workbook.workbook_obj, "Application"):
xl_app = workbook.workbook_obj.Application
else:
xl_app = Dispatch("Excel.Application")
xl = xl_app = gencache.EnsureDispatch(xl_app)
# Create a workbook if there isn't one already
if not workbook:
from .workbook import Workbook
workbook = Workbook(worksheets=[self])
if worksheet is None:
# If there's no worksheet then call Workbook.to_excel which will create one
return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns)
if rename:
self.__name = worksheet.Name
# set manual calculation and turn off screen updating while we update the cells
calculation = xl.Calculation
screen_updating = xl.ScreenUpdating
xl.Calculation = constants.xlCalculationManual
xl.ScreenUpdating = False
try:
# clear the worksheet and reset the styles
if clear:
worksheet.Cells.ClearContents()
worksheet.Cells.Font.Bold = False
worksheet.Cells.Font.Size = 11
worksheet.Cells.Font.Color = 0x000000
worksheet.Cells.Interior.ColorIndex = 0
worksheet.Cells.NumberFormat = "General"
# get any array formula tables
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
"""returns True if this formula cell is part of an array formula table"""
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
origin = worksheet.Range("A1")
xl_cell = origin
for r, row in enumerate(self.iterrows(workbook)):
row = _to_pywintypes(row)
# set the value and formulae to the excel range (it's much quicker to
# write a row at a time and update the formula than it is it do it
# cell by cell)
if clear:
xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row)))
xl_row.Value = row
else:
for c, value in enumerate(row):
if value is not None:
xl_cell.Offset(1, 1 + c).Value = value
for c, value in enumerate(row):
if isinstance(value, str):
if value.startswith("="):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).Formula = value
elif value.startswith("{=") \
and not _is_in_array_formula_table(r, c):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).FormulaArray = value
# move to the next row
xl_cell = xl_cell.Offset(2, 1)
# set any array formulas
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
data = table.get_data(workbook, row, col)
height, width = data.shape
upper_left = origin.Offset(row+1, col+1)
lower_right = origin.Offset(row + height, col + width)
xl_range = worksheet.Range(upper_left, lower_right)
xl_range.FormulaArray = table.formula.get_formula(workbook, row, col)
# set any formatting
for (row, col), style in self._get_all_styles().items():
r = origin.Offset(1 + row, 1 + col)
if style.bold:
r.Font.Bold = True
if style.excel_number_format is not None:
r.NumberFormat = style.excel_number_format
if style.size is not None:
r.Font.Size = style.size
if style.text_color is not None:
r.Font.Color = _to_bgr(style.text_color)
if style.bg_color is not None:
r.Interior.Color = _to_bgr(style.bg_color)
if style.text_wrap or style.border:
raise Exception("text wrap and border not implemented")
# add any charts
for chart, (row, col) in self.__charts:
top_left = origin.Offset(1 + row, 1 + col)
xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart
xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype)
if chart.title:
xl_chart.ChartTitle = chart.title
for series in chart.iter_series(self, row, col):
xl_series = xl_chart.SeriesCollection().NewSeries()
xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("="))
if "categories" in series:
xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("="))
if "name" in series:
xl_series.Name = series["name"]
finally:
xl.ScreenUpdating = screen_updating
xl.Calculation = calculation
if resize_columns:
try:
worksheet.Cells.EntireColumn.AutoFit()
except:
pass
def to_xlsx(self, filename=None, workbook=None):
"""
Write worksheet to a .xlsx file using xlsxwriter.
:param str filename: Filename to write to. If None no file is written.
:param xltable.Workbook: Workbook this sheet belongs to. If None a new workbook
will be created with this worksheet as the only sheet.
:return: :py:class:`xlsxwriter.workbook.Workbook` instance.
"""
from .workbook import Workbook
if not workbook:
workbook = Workbook(filename=filename)
workbook.append(self)
return workbook.to_xlsx()
ws = workbook.add_xlsx_worksheet(self, self.name)
_styles = {}
def _get_xlsx_style(cell_style):
"""
convert rb.excel style to xlsx writer style
"""
style_args = (
cell_style.bold,
cell_style.excel_number_format,
cell_style.text_color,
cell_style.bg_color,
cell_style.size,
cell_style.text_wrap,
cell_style.text_wrap,
cell_style.border,
cell_style.align,
cell_style.valign
)
if (style_args) not in _styles:
style = workbook.add_format()
if cell_style.bold:
style.set_bold()
if cell_style.excel_number_format is not None:
style.set_num_format(cell_style.excel_number_format)
if cell_style.text_color is not None:
style.set_font_color("#%06x" % cell_style.text_color)
if cell_style.bg_color is not None:
style.set_bg_color("#%06x" % cell_style.bg_color)
if cell_style.size is not None:
style.set_font_size(cell_style.size)
if cell_style.text_wrap:
style.set_text_wrap()
if cell_style.border:
if isinstance(cell_style.border, frozenset):
for border_position, border_style in cell_style.border:
if border_position == "bottom":
style.set_bottom(border_style)
elif border_position == "top":
style.set_top(border_style)
elif border_position == "left":
style.set_left(border_style)
elif border_position == "right":
style.set_right(border_style)
else:
raise AssertionError("Unknown border position '%s'." % border_position)
else:
style.set_border(cell_style.border)
if cell_style.align:
style.set_align(cell_style.align)
if cell_style.valign:
style.set_valign(cell_style.valign)
_styles[style_args] = style
return _styles[style_args]
# pre-compute the cells with non-default styles
ws_styles = self._get_all_styles()
ws_styles = {(r, c): _get_xlsx_style(s) for ((r, c), s) in ws_styles.items()}
plain_style = _get_xlsx_style(CellStyle())
# get any array formula tables
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
"""returns True if this formula cell is part of an array formula table"""
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
# write the rows to the worksheet
for ir, row in enumerate(self.iterrows(workbook)):
for ic, cell in enumerate(row):
style = ws_styles.get((ir, ic), plain_style)
if isinstance(cell, str):
if cell.startswith("="):
formula_value = self.__formula_values.get((ir, ic), 0)
ws.write_formula(ir, ic, cell, style, value=formula_value)
elif cell.startswith("{="):
# array formulas tables are written after everything else,
# but individual cells can also be array formulas
if not _is_in_array_formula_table(ir, ic):
formula_value = self.__formula_values.get((ir, ic), 0)
ws.write_array_formula(ir, ic, ir, ic,
cell, style,
value=formula_value)
else:
ws.write(ir, ic, cell, style)
else:
if isinstance(cell, self._xlsx_unsupported_types):
ws.write(ir, ic, str(cell), style)
else:
try:
ws.write(ir, ic, cell, style)
except TypeError:
ws.write(ir, ic, str(cell), style)
unsupported_types = set(self._xlsx_unsupported_types)
unsupported_types.add(type(cell))
self.__class__._xlsx_unsupported_types = tuple(unsupported_types)
# set any array formulas
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
style = ws_styles.get((row, col), plain_style)
data = table.get_data(workbook, row, col)
height, width = data.shape
bottom, right = (row + height - 1, col + width -1)
formula = table.formula.get_formula(workbook, row, col)
ws.write_array_formula(row, col, bottom, right, formula, style, value=data[0][0])
for y in range(height):
for x in range(width):
if y == 0 and x == 0:
continue
ir, ic = row + y, col + x
style = ws_styles.get((ir, ic), plain_style)
cell = data[y][x]
if isinstance(cell, str):
cell_str = cell.encode("ascii", "xmlcharrefreplace").decode("ascii")
ws.write_formula(ir, ic, cell_str, style)
else:
ws.write(ir, ic, cell, style)
# set any non-default column widths
for ic, width in self._get_column_widths().items():
ws.set_column(ic, ic, width)
# add any charts
for chart, (row, col) in self.__charts:
kwargs = {"type": chart.type}
if chart.subtype:
kwargs["subtype"] = chart.subtype
xl_chart = workbook.workbook_obj.add_chart(kwargs)
if chart.show_blanks:
xl_chart.show_blanks_as(chart.show_blanks)
for series in chart.iter_series(workbook, row, col):
# xlsxwriter expects the sheetname in the formula
values = series.get("values")
if isinstance(values, str) and values.startswith("=") and "!" not in values:
series["values"] = "='%s'!%s" % (self.name, values.lstrip("="))
categories = series.get("categories")
if isinstance(categories, str) and categories.startswith("=") and "!" not in categories:
series["categories"] = "='%s'!%s" % (self.name, categories.lstrip("="))
xl_chart.add_series(series)
xl_chart.set_size({"width": chart.width, "height": chart.height})
if chart.title:
xl_chart.set_title({"name": chart.title})
if chart.legend_position:
xl_chart.set_legend({"position": chart.legend_position})
if chart.x_axis:
xl_chart.set_x_axis(chart.x_axis)
if chart.y_axis:
xl_chart.set_y_axis(chart.y_axis)
ws.insert_chart(row, col, xl_chart)
# add any groups
for tables, collapsed in self.__groups:
min_row, max_row = 1000000, -1
for table, (row, col) in self.__tables.values():
if table in tables:
min_row = min(min_row, row)
max_row = max(max_row, row + table.height)
for i in range(min_row, max_row+1):
ws.set_row(i, None, None, {'level': 1, 'hidden': collapsed})
if filename:
workbook.close()
return workbook
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.