repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
checkout
|
python
|
def checkout(repo, ref):
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
|
Checkout a repoself.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L222-L245
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
get_roles
|
python
|
def get_roles(osa_repo_dir, commit, role_requirements):
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
|
Read OSA role information at a particular commit.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L248-L260
|
[
"def checkout(repo, ref):\n \"\"\"Checkout a repoself.\"\"\"\n # Delete local branch if it exists, remote branch will be tracked\n # automatically. This prevents stale local branches from causing problems.\n # It also avoids problems with appending origin/ to refs as that doesn't\n # work with tags, SHAs, and upstreams not called origin.\n if ref in repo.branches:\n # eg delete master but leave origin/master\n log.warn(\"Removing local branch {b} for repo {r}\".format(b=ref,\n r=repo))\n # Can't delete currently checked out branch, so make sure head is\n # detached before deleting.\n\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(repo.head.commit.hexsha)\n repo.delete_head(ref, '--force')\n\n log.info(\"Checkout out repo {repo} to ref {ref}\".format(repo=repo,\n ref=ref))\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(ref)\n repo.head.reset(index=True, working_tree=True)\n sha = repo.head.commit.hexsha\n log.info(\"Current SHA for repo {repo} is {sha}\".format(repo=repo, sha=sha))\n",
"def normalize_yaml(yaml):\n \"\"\"Normalize the YAML from project and role lookups.\n\n These are returned as a list of tuples.\n \"\"\"\n if isinstance(yaml, list):\n # Normalize the roles YAML data\n normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))\n for x in yaml]\n else:\n # Extract the project names from the roles YAML and create a list of\n # tuples.\n projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]\n normalized_yaml = []\n for project in projects:\n repo_url = yaml['{0}_git_repo'.format(project)]\n commit_sha = yaml['{0}_git_install_branch'.format(project)]\n normalized_yaml.append((project, repo_url, commit_sha))\n\n return normalized_yaml\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
make_osa_report
|
python
|
def make_osa_report(repo_dir, old_commit, new_commit,
args):
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
|
Create initial RST report header for OpenStack-Ansible.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L263-L286
|
[
"def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):\n \"\"\"Find all commits between two commit SHAs.\"\"\"\n repo = Repo(repo_dir)\n commits = repo.iter_commits(rev=\"{0}..{1}\".format(old_commit, new_commit))\n if hide_merges:\n return [x for x in commits if not x.summary.startswith(\"Merge \")]\n else:\n return list(commits)\n",
"def get_commit_url(repo_url):\n \"\"\"Determine URL to view commits for repo.\"\"\"\n if \"github.com\" in repo_url:\n return repo_url[:-4] if repo_url.endswith(\".git\") else repo_url\n if \"git.openstack.org\" in repo_url:\n uri = '/'.join(repo_url.split('/')[-2:])\n return \"https://github.com/{0}\".format(uri)\n\n # If it didn't match these conditions, just return it.\n return repo_url\n",
"def update_repo(repo_dir, repo_url, fetch=False):\n \"\"\"Clone the repo if it doesn't exist already, otherwise update it.\"\"\"\n repo_exists = os.path.exists(repo_dir)\n if not repo_exists:\n log.info(\"Cloning repo {}\".format(repo_url))\n repo = repo_clone(repo_dir, repo_url)\n\n # Make sure the repo is properly prepared\n # and has all the refs required\n log.info(\"Fetching repo {} (fetch: {})\".format(repo_url, fetch))\n repo = repo_pull(repo_dir, repo_url, fetch)\n\n return repo\n",
"def validate_commits(repo_dir, commits):\n \"\"\"Test if a commit is valid for the repository.\"\"\"\n log.debug(\"Validating {c} exist in {r}\".format(c=commits, r=repo_dir))\n repo = Repo(repo_dir)\n for commit in commits:\n try:\n commit = repo.commit(commit)\n except Exception:\n msg = (\"Commit {commit} could not be found in repo {repo}. \"\n \"You may need to pass --update to fetch the latest \"\n \"updates to the git repositories stored on \"\n \"your local computer.\".format(repo=repo_dir, commit=commit))\n raise exceptions.InvalidCommitException(msg)\n\n return True\n",
"def validate_commit_range(repo_dir, old_commit, new_commit):\n \"\"\"Check if commit range is valid. Flip it if needed.\"\"\"\n # Are there any commits between the two commits that were provided?\n try:\n commits = get_commits(repo_dir, old_commit, new_commit)\n except Exception:\n commits = []\n if len(commits) == 0:\n # The user might have gotten their commits out of order. Let's flip\n # the order of the commits and try again.\n try:\n commits = get_commits(repo_dir, new_commit, old_commit)\n except Exception:\n commits = []\n if len(commits) == 0:\n # Okay, so there really are no commits between the two commits\n # provided by the user. :)\n msg = (\"The commit range {0}..{1} is invalid for {2}.\"\n \"You may need to use the --update option to fetch the \"\n \"latest updates to the git repositories stored on your \"\n \"local computer.\".format(old_commit, new_commit, repo_dir))\n raise exceptions.InvalidCommitRangeException(msg)\n else:\n return 'flip'\n\n return True\n",
"def render_template(template_file, template_vars):\n \"\"\"Render a jinja template.\"\"\"\n # Load our Jinja templates\n template_dir = \"{0}/templates\".format(\n os.path.dirname(os.path.abspath(__file__))\n )\n jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_dir),\n trim_blocks=True\n )\n rendered = jinja_env.get_template(template_file).render(template_vars)\n\n return rendered\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
make_report
|
python
|
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
|
Create RST report from a list of projects/roles.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L289-L329
|
[
"def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):\n \"\"\"Find all commits between two commit SHAs.\"\"\"\n repo = Repo(repo_dir)\n commits = repo.iter_commits(rev=\"{0}..{1}\".format(old_commit, new_commit))\n if hide_merges:\n return [x for x in commits if not x.summary.startswith(\"Merge \")]\n else:\n return list(commits)\n",
"def get_commit_url(repo_url):\n \"\"\"Determine URL to view commits for repo.\"\"\"\n if \"github.com\" in repo_url:\n return repo_url[:-4] if repo_url.endswith(\".git\") else repo_url\n if \"git.openstack.org\" in repo_url:\n uri = '/'.join(repo_url.split('/')[-2:])\n return \"https://github.com/{0}\".format(uri)\n\n # If it didn't match these conditions, just return it.\n return repo_url\n",
"def update_repo(repo_dir, repo_url, fetch=False):\n \"\"\"Clone the repo if it doesn't exist already, otherwise update it.\"\"\"\n repo_exists = os.path.exists(repo_dir)\n if not repo_exists:\n log.info(\"Cloning repo {}\".format(repo_url))\n repo = repo_clone(repo_dir, repo_url)\n\n # Make sure the repo is properly prepared\n # and has all the refs required\n log.info(\"Fetching repo {} (fetch: {})\".format(repo_url, fetch))\n repo = repo_pull(repo_dir, repo_url, fetch)\n\n return repo\n",
"def validate_commits(repo_dir, commits):\n \"\"\"Test if a commit is valid for the repository.\"\"\"\n log.debug(\"Validating {c} exist in {r}\".format(c=commits, r=repo_dir))\n repo = Repo(repo_dir)\n for commit in commits:\n try:\n commit = repo.commit(commit)\n except Exception:\n msg = (\"Commit {commit} could not be found in repo {repo}. \"\n \"You may need to pass --update to fetch the latest \"\n \"updates to the git repositories stored on \"\n \"your local computer.\".format(repo=repo_dir, commit=commit))\n raise exceptions.InvalidCommitException(msg)\n\n return True\n",
"def render_template(template_file, template_vars):\n \"\"\"Render a jinja template.\"\"\"\n # Load our Jinja templates\n template_dir = \"{0}/templates\".format(\n os.path.dirname(os.path.abspath(__file__))\n )\n jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_dir),\n trim_blocks=True\n )\n rendered = jinja_env.get_template(template_file).render(template_vars)\n\n return rendered\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
normalize_yaml
|
python
|
def normalize_yaml(yaml):
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
|
Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L332-L351
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
post_gist
|
python
|
def post_gist(report_data, old_sha, new_sha):
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
|
Post the report to a GitHub Gist and return the URL of the gist.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L361-L376
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
prepare_storage_dir
|
python
|
def prepare_storage_dir(storage_directory):
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
|
Prepare the storage directory.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L399-L405
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
render_template
|
python
|
def render_template(template_file, template_vars):
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
|
Render a jinja template.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L408-L420
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
repo_pull
|
python
|
def repo_pull(repo_dir, repo_url, fetch=False):
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
|
Reset repository and optionally update it.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L429-L456
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
update_repo
|
python
|
def update_repo(repo_dir, repo_url, fetch=False):
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
|
Clone the repo if it doesn't exist already, otherwise update it.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L459-L471
|
[
"def repo_clone(repo_dir, repo_url):\n \"\"\"Clone repository to this host.\"\"\"\n repo = Repo.clone_from(repo_url, repo_dir)\n return repo\n",
"def repo_pull(repo_dir, repo_url, fetch=False):\n \"\"\"Reset repository and optionally update it.\"\"\"\n # Make sure the repository is reset to the master branch.\n repo = Repo(repo_dir)\n repo.git.clean(\"-df\")\n repo.git.reset(\"--hard\")\n repo.git.checkout(\"master\")\n repo.head.reset(index=True, working_tree=True)\n\n # Compile the refspec appropriately to ensure\n # that if the repo is from github it includes\n # all the refs needed, including PR's.\n refspec_list = [\n \"+refs/heads/*:refs/remotes/origin/*\",\n \"+refs/heads/*:refs/heads/*\",\n \"+refs/tags/*:refs/tags/*\"\n ]\n if \"github.com\" in repo_url:\n refspec_list.extend([\n \"+refs/pull/*:refs/remotes/origin/pr/*\",\n \"+refs/heads/*:refs/remotes/origin/*\"])\n\n # Only get the latest updates if requested.\n if fetch:\n repo.git.fetch([\"-u\", \"-v\", \"-f\",\n repo_url,\n refspec_list])\n return repo\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
validate_commits
|
python
|
def validate_commits(repo_dir, commits):
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
|
Test if a commit is valid for the repository.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L474-L488
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
validate_commit_range
|
python
|
def validate_commit_range(repo_dir, old_commit, new_commit):
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
|
Check if commit range is valid. Flip it if needed.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L491-L516
|
[
"def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):\n \"\"\"Find all commits between two commit SHAs.\"\"\"\n repo = Repo(repo_dir)\n commits = repo.iter_commits(rev=\"{0}..{1}\".format(old_commit, new_commit))\n if hide_merges:\n return [x for x in commits if not x.summary.startswith(\"Merge \")]\n else:\n return list(commits)\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
get_release_notes
|
python
|
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
|
Get release notes between the two revisions.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L519-L614
|
[
"def checkout(repo, ref):\n \"\"\"Checkout a repoself.\"\"\"\n # Delete local branch if it exists, remote branch will be tracked\n # automatically. This prevents stale local branches from causing problems.\n # It also avoids problems with appending origin/ to refs as that doesn't\n # work with tags, SHAs, and upstreams not called origin.\n if ref in repo.branches:\n # eg delete master but leave origin/master\n log.warn(\"Removing local branch {b} for repo {r}\".format(b=ref,\n r=repo))\n # Can't delete currently checked out branch, so make sure head is\n # detached before deleting.\n\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(repo.head.commit.hexsha)\n repo.delete_head(ref, '--force')\n\n log.info(\"Checkout out repo {repo} to ref {ref}\".format(repo=repo,\n ref=ref))\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(ref)\n repo.head.reset(index=True, working_tree=True)\n sha = repo.head.commit.hexsha\n log.info(\"Current SHA for repo {repo} is {sha}\".format(repo=repo, sha=sha))\n",
"def _fix_tags_list(tags):\n new_list = []\n for tag in tags:\n rc_releases = []\n # Ignore rc and b releases, these will be built\n # out in the list comprehension below.\n # Finding the rc and b releases of the tag..\n if 'rc' not in tag and 'b' not in tag:\n rc_releases = [\n rc_tag for rc_tag in tags\n if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)\n ]\n new_list.extend(rc_releases)\n # Make sure we don't add the tag in twice\n if tag not in new_list:\n new_list.append(tag)\n return new_list\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
run_osa_differ
|
python
|
def run_osa_differ():
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
|
Start here.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L646-L720
|
[
"def get_projects(osa_repo_dir, commit):\n \"\"\"Get all projects from multiple YAML files.\"\"\"\n # Check out the correct commit SHA from the repository\n repo = Repo(osa_repo_dir)\n checkout(repo, commit)\n\n yaml_files = glob.glob(\n '{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)\n )\n yaml_parsed = []\n for yaml_file in yaml_files:\n with open(yaml_file, 'r') as f:\n yaml_parsed.append(yaml.load(f))\n\n merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}\n\n return normalize_yaml(merged_dicts)\n",
"def get_roles(osa_repo_dir, commit, role_requirements):\n \"\"\"Read OSA role information at a particular commit.\"\"\"\n repo = Repo(osa_repo_dir)\n\n checkout(repo, commit)\n\n log.info(\"Looking for file {f} in repo {r}\".format(r=osa_repo_dir,\n f=role_requirements))\n filename = \"{0}/{1}\".format(osa_repo_dir, role_requirements)\n with open(filename, 'r') as f:\n roles_yaml = yaml.load(f)\n\n return normalize_yaml(roles_yaml)\n",
"def make_osa_report(repo_dir, old_commit, new_commit,\n args):\n \"\"\"Create initial RST report header for OpenStack-Ansible.\"\"\"\n update_repo(repo_dir, args.osa_repo_url, args.update)\n\n # Are these commits valid?\n validate_commits(repo_dir, [old_commit, new_commit])\n\n # Do we have a valid commit range?\n validate_commit_range(repo_dir, old_commit, new_commit)\n\n # Get the commits in the range\n commits = get_commits(repo_dir, old_commit, new_commit)\n\n # Start off our report with a header and our OpenStack-Ansible commits.\n template_vars = {\n 'args': args,\n 'repo': 'openstack-ansible',\n 'commits': commits,\n 'commit_base_url': get_commit_url(args.osa_repo_url),\n 'old_sha': old_commit,\n 'new_sha': new_commit\n }\n return render_template('offline-header.j2', template_vars)\n",
"def make_report(storage_directory, old_pins, new_pins, do_update=False,\n version_mappings=None):\n \"\"\"Create RST report from a list of projects/roles.\"\"\"\n report = \"\"\n version_mappings = version_mappings or {}\n for new_pin in new_pins:\n repo_name, repo_url, commit_sha = new_pin\n commit_sha = version_mappings.get(repo_name, {}\n ).get(commit_sha, commit_sha)\n\n # Prepare our repo directory and clone the repo if needed. Only pull\n # if the user requests it.\n repo_dir = \"{0}/{1}\".format(storage_directory, repo_name)\n update_repo(repo_dir, repo_url, do_update)\n\n # Get the old SHA from the previous pins. If this pin didn't exist\n # in the previous OSA revision, skip it. This could happen with newly-\n # added projects and roles.\n try:\n commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)\n except Exception:\n continue\n else:\n commit_sha_old = version_mappings.get(repo_name, {}\n ).get(commit_sha_old,\n commit_sha_old)\n\n # Loop through the commits and render our template.\n validate_commits(repo_dir, [commit_sha_old, commit_sha])\n commits = get_commits(repo_dir, commit_sha_old, commit_sha)\n template_vars = {\n 'repo': repo_name,\n 'commits': commits,\n 'commit_base_url': get_commit_url(repo_url),\n 'old_sha': commit_sha_old,\n 'new_sha': commit_sha\n }\n rst = render_template('offline-repo-changes.j2', template_vars)\n report += rst\n\n return report\n",
"def parse_arguments():\n \"\"\"Parse arguments.\"\"\"\n parser = create_parser()\n args = parser.parse_args()\n return args\n",
"def publish_report(report, args, old_commit, new_commit):\n \"\"\"Publish the RST report based on the user request.\"\"\"\n # Print the report to stdout unless the user specified --quiet.\n output = \"\"\n\n if not args.quiet and not args.gist and not args.file:\n return report\n\n if args.gist:\n gist_url = post_gist(report, old_commit, new_commit)\n output += \"\\nReport posted to GitHub Gist: {0}\".format(gist_url)\n\n if args.file is not None:\n with open(args.file, 'w') as f:\n f.write(report)\n output += \"\\nReport written to file: {0}\".format(args.file)\n\n return output\n",
"def prepare_storage_dir(storage_directory):\n \"\"\"Prepare the storage directory.\"\"\"\n storage_directory = os.path.expanduser(storage_directory)\n if not os.path.exists(storage_directory):\n os.mkdir(storage_directory)\n\n return storage_directory\n",
"def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):\n \"\"\"Get release notes between the two revisions.\"\"\"\n repo = Repo(osa_repo_dir)\n\n # Get a list of tags, sorted\n tags = repo.git.tag().split('\\n')\n tags = sorted(tags, key=LooseVersion)\n # Currently major tags are being printed after rc and\n # b tags. We need to fix the list so that major\n # tags are printed before rc and b releases\n tags = _fix_tags_list(tags)\n\n # Find the closest tag from a given SHA\n # The tag found here is the tag that was cut\n # either on or before the given SHA\n checkout(repo, osa_old_commit)\n old_tag = repo.git.describe()\n\n # If the SHA given is between two release tags, then\n # 'git describe' will return a tag in form of\n # <tag>-<commitNum>-<sha>. For example:\n # 14.0.2-3-g6931e26\n # Since reno does not support this format, we need to\n # strip away the commit number and sha bits.\n if '-' in old_tag:\n old_tag = old_tag[0:old_tag.index('-')]\n\n # Get the nearest tag associated with the new commit\n checkout(repo, osa_new_commit)\n new_tag = repo.git.describe()\n if '-' in new_tag:\n nearest_new_tag = new_tag[0:new_tag.index('-')]\n else:\n nearest_new_tag = new_tag\n\n # Truncate the tags list to only include versions\n # between old_sha and new_sha. The latest release\n # is not included in this list. That version will be\n # printed separately in the following step.\n tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]\n\n release_notes = \"\"\n # Checkout the new commit, then run reno to get the latest\n # releasenotes that have been created or updated between\n # the latest release and this new commit.\n repo.git.checkout(osa_new_commit, '-f')\n reno_report_command = ['reno',\n 'report',\n '--earliest-version',\n nearest_new_tag]\n reno_report_p = subprocess.Popen(reno_report_command,\n cwd=osa_repo_dir,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n reno_output = reno_report_p.communicate()[0].decode('UTF-8')\n release_notes += reno_output\n\n # We want to start with the latest packaged release first, so\n # the tags list is reversed\n for version in reversed(tags):\n # If version is an rc or b tag, and it has a major\n # release tag, then skip it. There is no need to print\n # release notes for an rc or b release unless we are\n # comparing shas between two rc or b releases.\n repo.git.checkout(version, '-f')\n # We are outputing one version at a time here\n reno_report_command = ['reno',\n 'report',\n '--branch',\n version,\n '--earliest-version',\n version]\n reno_report_p = subprocess.Popen(reno_report_command,\n cwd=osa_repo_dir,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n reno_output = reno_report_p.communicate()[0].decode('UTF-8')\n # We need to ensure the output includes the version we are concerned\n # about.\n # This is due to https://bugs.launchpad.net/reno/+bug/1670173\n if version in reno_output:\n release_notes += reno_output\n\n # Clean up \"Release Notes\" title. We don't need this title for\n # each tagged release.\n release_notes = release_notes.replace(\n \"=============\\nRelease Notes\\n=============\",\n \"\"\n )\n # Replace headers that contain '=' with '~' to comply with osa-differ's\n # formatting\n release_notes = re.sub('===+', _equal_to_tilde, release_notes)\n # Replace headers that contain '-' with '#' to comply with osa-differ's\n # formatting\n release_notes = re.sub('---+', _dash_to_num, release_notes)\n return release_notes\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
if __name__ == "__main__":
run_osa_differ()
|
pudo/normality
|
normality/paths.py
|
_safe_name
|
python
|
def _safe_name(file_name, sep):
file_name = stringify(file_name)
if file_name is None:
return
file_name = ascii_text(file_name)
file_name = category_replace(file_name, UNICODE_CATEGORIES)
file_name = collapse_spaces(file_name)
if file_name is None or not len(file_name):
return
return file_name.replace(WS, sep)
|
Convert the file name to ASCII and normalize the string.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/paths.py#L11-L21
|
[
"def collapse_spaces(text):\n \"\"\"Remove newlines, tabs and multiple spaces with single spaces.\"\"\"\n if not isinstance(text, six.string_types):\n return text\n return COLLAPSE_RE.sub(WS, text).strip(WS)\n",
"def category_replace(text, replacements=UNICODE_CATEGORIES):\n \"\"\"Remove characters from a string based on unicode classes.\n\n This is a method for removing non-text characters (such as punctuation,\n whitespace, marks and diacritics) from a piece of text by class, rather\n than specifying them individually.\n \"\"\"\n if text is None:\n return None\n characters = []\n for character in decompose_nfkd(text):\n cat = category(character)\n replacement = replacements.get(cat, character)\n if replacement is not None:\n characters.append(replacement)\n return u''.join(characters)\n",
"def ascii_text(text):\n \"\"\"Transliterate the given text and make sure it ends up as ASCII.\"\"\"\n text = latinize_text(text, ascii=True)\n if isinstance(text, six.text_type):\n text = text.encode('ascii', 'ignore').decode('ascii')\n return text\n",
"def stringify(value, encoding_default='utf-8', encoding=None):\n \"\"\"Brute-force convert a given object to a string.\n\n This will attempt an increasingly mean set of conversions to make a given\n object into a unicode string. It is guaranteed to either return unicode or\n None, if all conversions failed (or the value is indeed empty).\n \"\"\"\n if value is None:\n return None\n\n if not isinstance(value, six.text_type):\n if isinstance(value, (date, datetime)):\n return value.isoformat()\n elif isinstance(value, (float, Decimal)):\n return Decimal(value).to_eng_string()\n elif isinstance(value, six.binary_type):\n if encoding is None:\n encoding = guess_encoding(value, default=encoding_default)\n value = value.decode(encoding, 'replace')\n value = remove_byte_order_mark(value)\n value = remove_unsafe_chars(value)\n else:\n value = six.text_type(value)\n\n # XXX: is this really a good idea?\n value = value.strip()\n if not len(value):\n return None\n return value\n"
] |
import os
from banal import decode_path
from normality.stringify import stringify
from normality.cleaning import collapse_spaces, category_replace
from normality.constants import UNICODE_CATEGORIES, WS
from normality.transliteration import ascii_text
MAX_LENGTH = 254
def safe_filename(file_name, sep='_', default=None, extension=None):
"""Create a secure filename for plain file system storage."""
if file_name is None:
return decode_path(default)
file_name = decode_path(file_name)
file_name = os.path.basename(file_name)
file_name, _extension = os.path.splitext(file_name)
file_name = _safe_name(file_name, sep=sep)
if file_name is None:
return decode_path(default)
file_name = file_name[:MAX_LENGTH]
extension = _safe_name(extension or _extension, sep=sep)
if extension is not None:
file_name = '.'.join((file_name, extension))
file_name = file_name[:MAX_LENGTH]
return file_name
|
pudo/normality
|
normality/paths.py
|
safe_filename
|
python
|
def safe_filename(file_name, sep='_', default=None, extension=None):
if file_name is None:
return decode_path(default)
file_name = decode_path(file_name)
file_name = os.path.basename(file_name)
file_name, _extension = os.path.splitext(file_name)
file_name = _safe_name(file_name, sep=sep)
if file_name is None:
return decode_path(default)
file_name = file_name[:MAX_LENGTH]
extension = _safe_name(extension or _extension, sep=sep)
if extension is not None:
file_name = '.'.join((file_name, extension))
file_name = file_name[:MAX_LENGTH]
return file_name
|
Create a secure filename for plain file system storage.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/paths.py#L24-L40
|
[
"def _safe_name(file_name, sep):\n \"\"\"Convert the file name to ASCII and normalize the string.\"\"\"\n file_name = stringify(file_name)\n if file_name is None:\n return\n file_name = ascii_text(file_name)\n file_name = category_replace(file_name, UNICODE_CATEGORIES)\n file_name = collapse_spaces(file_name)\n if file_name is None or not len(file_name):\n return\n return file_name.replace(WS, sep)\n"
] |
import os
from banal import decode_path
from normality.stringify import stringify
from normality.cleaning import collapse_spaces, category_replace
from normality.constants import UNICODE_CATEGORIES, WS
from normality.transliteration import ascii_text
MAX_LENGTH = 254
def _safe_name(file_name, sep):
"""Convert the file name to ASCII and normalize the string."""
file_name = stringify(file_name)
if file_name is None:
return
file_name = ascii_text(file_name)
file_name = category_replace(file_name, UNICODE_CATEGORIES)
file_name = collapse_spaces(file_name)
if file_name is None or not len(file_name):
return
return file_name.replace(WS, sep)
|
pudo/normality
|
normality/stringify.py
|
stringify
|
python
|
def stringify(value, encoding_default='utf-8', encoding=None):
if value is None:
return None
if not isinstance(value, six.text_type):
if isinstance(value, (date, datetime)):
return value.isoformat()
elif isinstance(value, (float, Decimal)):
return Decimal(value).to_eng_string()
elif isinstance(value, six.binary_type):
if encoding is None:
encoding = guess_encoding(value, default=encoding_default)
value = value.decode(encoding, 'replace')
value = remove_byte_order_mark(value)
value = remove_unsafe_chars(value)
else:
value = six.text_type(value)
# XXX: is this really a good idea?
value = value.strip()
if not len(value):
return None
return value
|
Brute-force convert a given object to a string.
This will attempt an increasingly mean set of conversions to make a given
object into a unicode string. It is guaranteed to either return unicode or
None, if all conversions failed (or the value is indeed empty).
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/stringify.py#L10-L38
| null |
import six
from datetime import datetime, date
from decimal import Decimal
from normality.cleaning import remove_byte_order_mark
from normality.cleaning import remove_unsafe_chars
from normality.encoding import guess_encoding
|
pudo/normality
|
normality/encoding.py
|
normalize_encoding
|
python
|
def normalize_encoding(encoding, default=DEFAULT_ENCODING):
if encoding is None:
return default
encoding = encoding.lower().strip()
if encoding in ['', 'ascii']:
return default
try:
codecs.lookup(encoding)
return encoding
except LookupError:
return default
|
Normalize the encoding name, replace ASCII w/ UTF-8.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L8-L19
| null |
import io
import codecs
import chardet
DEFAULT_ENCODING = 'utf-8'
def normalize_result(result, default, threshold=0.2):
"""Interpret a chardet result."""
if result is None:
return default
if result.get('confidence') is None:
return default
if result.get('confidence') < threshold:
return default
return normalize_encoding(result.get('encoding'),
default=default)
def guess_encoding(text, default=DEFAULT_ENCODING):
"""Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
"""
result = chardet.detect(text)
return normalize_result(result, default=default)
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
"""Guess encoding from a file handle."""
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default)
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
"""Wrapper to open that damn file for you, lazy bastard."""
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default)
|
pudo/normality
|
normality/encoding.py
|
normalize_result
|
python
|
def normalize_result(result, default, threshold=0.2):
if result is None:
return default
if result.get('confidence') is None:
return default
if result.get('confidence') < threshold:
return default
return normalize_encoding(result.get('encoding'),
default=default)
|
Interpret a chardet result.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L22-L31
|
[
"def normalize_encoding(encoding, default=DEFAULT_ENCODING):\n \"\"\"Normalize the encoding name, replace ASCII w/ UTF-8.\"\"\"\n if encoding is None:\n return default\n encoding = encoding.lower().strip()\n if encoding in ['', 'ascii']:\n return default\n try:\n codecs.lookup(encoding)\n return encoding\n except LookupError:\n return default\n"
] |
import io
import codecs
import chardet
DEFAULT_ENCODING = 'utf-8'
def normalize_encoding(encoding, default=DEFAULT_ENCODING):
"""Normalize the encoding name, replace ASCII w/ UTF-8."""
if encoding is None:
return default
encoding = encoding.lower().strip()
if encoding in ['', 'ascii']:
return default
try:
codecs.lookup(encoding)
return encoding
except LookupError:
return default
def guess_encoding(text, default=DEFAULT_ENCODING):
"""Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
"""
result = chardet.detect(text)
return normalize_result(result, default=default)
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
"""Guess encoding from a file handle."""
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default)
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
"""Wrapper to open that damn file for you, lazy bastard."""
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default)
|
pudo/normality
|
normality/encoding.py
|
guess_encoding
|
python
|
def guess_encoding(text, default=DEFAULT_ENCODING):
result = chardet.detect(text)
return normalize_result(result, default=default)
|
Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L34-L41
|
[
"def normalize_result(result, default, threshold=0.2):\n \"\"\"Interpret a chardet result.\"\"\"\n if result is None:\n return default\n if result.get('confidence') is None:\n return default\n if result.get('confidence') < threshold:\n return default\n return normalize_encoding(result.get('encoding'),\n default=default)\n"
] |
import io
import codecs
import chardet
DEFAULT_ENCODING = 'utf-8'
def normalize_encoding(encoding, default=DEFAULT_ENCODING):
"""Normalize the encoding name, replace ASCII w/ UTF-8."""
if encoding is None:
return default
encoding = encoding.lower().strip()
if encoding in ['', 'ascii']:
return default
try:
codecs.lookup(encoding)
return encoding
except LookupError:
return default
def normalize_result(result, default, threshold=0.2):
"""Interpret a chardet result."""
if result is None:
return default
if result.get('confidence') is None:
return default
if result.get('confidence') < threshold:
return default
return normalize_encoding(result.get('encoding'),
default=default)
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
"""Guess encoding from a file handle."""
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default)
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
"""Wrapper to open that damn file for you, lazy bastard."""
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default)
|
pudo/normality
|
normality/encoding.py
|
guess_file_encoding
|
python
|
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default)
|
Guess encoding from a file handle.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L44-L58
|
[
"def normalize_result(result, default, threshold=0.2):\n \"\"\"Interpret a chardet result.\"\"\"\n if result is None:\n return default\n if result.get('confidence') is None:\n return default\n if result.get('confidence') < threshold:\n return default\n return normalize_encoding(result.get('encoding'),\n default=default)\n"
] |
import io
import codecs
import chardet
DEFAULT_ENCODING = 'utf-8'
def normalize_encoding(encoding, default=DEFAULT_ENCODING):
"""Normalize the encoding name, replace ASCII w/ UTF-8."""
if encoding is None:
return default
encoding = encoding.lower().strip()
if encoding in ['', 'ascii']:
return default
try:
codecs.lookup(encoding)
return encoding
except LookupError:
return default
def normalize_result(result, default, threshold=0.2):
"""Interpret a chardet result."""
if result is None:
return default
if result.get('confidence') is None:
return default
if result.get('confidence') < threshold:
return default
return normalize_encoding(result.get('encoding'),
default=default)
def guess_encoding(text, default=DEFAULT_ENCODING):
"""Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
"""
result = chardet.detect(text)
return normalize_result(result, default=default)
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
"""Wrapper to open that damn file for you, lazy bastard."""
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default)
|
pudo/normality
|
normality/encoding.py
|
guess_path_encoding
|
python
|
def guess_path_encoding(file_path, default=DEFAULT_ENCODING):
with io.open(file_path, 'rb') as fh:
return guess_file_encoding(fh, default=default)
|
Wrapper to open that damn file for you, lazy bastard.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/encoding.py#L61-L64
|
[
"def guess_file_encoding(fh, default=DEFAULT_ENCODING):\n \"\"\"Guess encoding from a file handle.\"\"\"\n start = fh.tell()\n detector = chardet.UniversalDetector()\n while True:\n data = fh.read(1024 * 10)\n if not data:\n detector.close()\n break\n detector.feed(data)\n if detector.done:\n break\n\n fh.seek(start)\n return normalize_result(detector.result, default=default)\n"
] |
import io
import codecs
import chardet
DEFAULT_ENCODING = 'utf-8'
def normalize_encoding(encoding, default=DEFAULT_ENCODING):
"""Normalize the encoding name, replace ASCII w/ UTF-8."""
if encoding is None:
return default
encoding = encoding.lower().strip()
if encoding in ['', 'ascii']:
return default
try:
codecs.lookup(encoding)
return encoding
except LookupError:
return default
def normalize_result(result, default, threshold=0.2):
"""Interpret a chardet result."""
if result is None:
return default
if result.get('confidence') is None:
return default
if result.get('confidence') < threshold:
return default
return normalize_encoding(result.get('encoding'),
default=default)
def guess_encoding(text, default=DEFAULT_ENCODING):
"""Guess string encoding.
Given a piece of text, apply character encoding detection to
guess the appropriate encoding of the text.
"""
result = chardet.detect(text)
return normalize_result(result, default=default)
def guess_file_encoding(fh, default=DEFAULT_ENCODING):
"""Guess encoding from a file handle."""
start = fh.tell()
detector = chardet.UniversalDetector()
while True:
data = fh.read(1024 * 10)
if not data:
detector.close()
break
detector.feed(data)
if detector.done:
break
fh.seek(start)
return normalize_result(detector.result, default=default)
|
pudo/normality
|
normality/cleaning.py
|
decompose_nfkd
|
python
|
def decompose_nfkd(text):
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text)
|
Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L17-L28
| null |
# coding: utf-8
from __future__ import unicode_literals
import re
import six
from icu import Transliterator
from unicodedata import category
from normality.constants import UNICODE_CATEGORIES, CONTROL_CODES, WS
COLLAPSE_RE = re.compile(r'\s+', re.U)
BOM_RE = re.compile('^\ufeff', re.U)
UNSAFE_RE = re.compile('\x00', re.U)
QUOTES_RE = re.compile('^["\'](.*)["\']$')
def compose_nfc(text):
"""Perform unicode composition."""
if text is None:
return None
if not hasattr(compose_nfc, '_tr'):
compose_nfc._tr = Transliterator.createInstance('Any-NFC')
return compose_nfc._tr.transliterate(text)
def strip_quotes(text):
"""Remove double or single quotes surrounding a string."""
if text is None:
return
return QUOTES_RE.sub('\\1', text)
def category_replace(text, replacements=UNICODE_CATEGORIES):
"""Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
"""
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters)
def remove_control_chars(text):
"""Remove just the control codes from a piece of text."""
return category_replace(text, replacements=CONTROL_CODES)
def remove_unsafe_chars(text):
"""Remove unsafe unicode characters from a piece of text."""
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text
def remove_byte_order_mark(text):
"""Remove a BOM from the beginning of the text."""
return BOM_RE.sub('', text)
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
pudo/normality
|
normality/cleaning.py
|
compose_nfc
|
python
|
def compose_nfc(text):
if text is None:
return None
if not hasattr(compose_nfc, '_tr'):
compose_nfc._tr = Transliterator.createInstance('Any-NFC')
return compose_nfc._tr.transliterate(text)
|
Perform unicode composition.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L31-L37
| null |
# coding: utf-8
from __future__ import unicode_literals
import re
import six
from icu import Transliterator
from unicodedata import category
from normality.constants import UNICODE_CATEGORIES, CONTROL_CODES, WS
COLLAPSE_RE = re.compile(r'\s+', re.U)
BOM_RE = re.compile('^\ufeff', re.U)
UNSAFE_RE = re.compile('\x00', re.U)
QUOTES_RE = re.compile('^["\'](.*)["\']$')
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text)
def strip_quotes(text):
"""Remove double or single quotes surrounding a string."""
if text is None:
return
return QUOTES_RE.sub('\\1', text)
def category_replace(text, replacements=UNICODE_CATEGORIES):
"""Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
"""
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters)
def remove_control_chars(text):
"""Remove just the control codes from a piece of text."""
return category_replace(text, replacements=CONTROL_CODES)
def remove_unsafe_chars(text):
"""Remove unsafe unicode characters from a piece of text."""
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text
def remove_byte_order_mark(text):
"""Remove a BOM from the beginning of the text."""
return BOM_RE.sub('', text)
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
pudo/normality
|
normality/cleaning.py
|
category_replace
|
python
|
def category_replace(text, replacements=UNICODE_CATEGORIES):
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters)
|
Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L47-L62
|
[
"def decompose_nfkd(text):\n \"\"\"Perform unicode compatibility decomposition.\n\n This will replace some non-standard value representations in unicode and\n normalise them, while also separating characters and their diacritics into\n two separate codepoints.\n \"\"\"\n if text is None:\n return None\n if not hasattr(decompose_nfkd, '_tr'):\n decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')\n return decompose_nfkd._tr.transliterate(text)\n"
] |
# coding: utf-8
from __future__ import unicode_literals
import re
import six
from icu import Transliterator
from unicodedata import category
from normality.constants import UNICODE_CATEGORIES, CONTROL_CODES, WS
COLLAPSE_RE = re.compile(r'\s+', re.U)
BOM_RE = re.compile('^\ufeff', re.U)
UNSAFE_RE = re.compile('\x00', re.U)
QUOTES_RE = re.compile('^["\'](.*)["\']$')
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text)
def compose_nfc(text):
"""Perform unicode composition."""
if text is None:
return None
if not hasattr(compose_nfc, '_tr'):
compose_nfc._tr = Transliterator.createInstance('Any-NFC')
return compose_nfc._tr.transliterate(text)
def strip_quotes(text):
"""Remove double or single quotes surrounding a string."""
if text is None:
return
return QUOTES_RE.sub('\\1', text)
def remove_control_chars(text):
"""Remove just the control codes from a piece of text."""
return category_replace(text, replacements=CONTROL_CODES)
def remove_unsafe_chars(text):
"""Remove unsafe unicode characters from a piece of text."""
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text
def remove_byte_order_mark(text):
"""Remove a BOM from the beginning of the text."""
return BOM_RE.sub('', text)
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
pudo/normality
|
normality/cleaning.py
|
remove_unsafe_chars
|
python
|
def remove_unsafe_chars(text):
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text
|
Remove unsafe unicode characters from a piece of text.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L70-L74
| null |
# coding: utf-8
from __future__ import unicode_literals
import re
import six
from icu import Transliterator
from unicodedata import category
from normality.constants import UNICODE_CATEGORIES, CONTROL_CODES, WS
COLLAPSE_RE = re.compile(r'\s+', re.U)
BOM_RE = re.compile('^\ufeff', re.U)
UNSAFE_RE = re.compile('\x00', re.U)
QUOTES_RE = re.compile('^["\'](.*)["\']$')
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text)
def compose_nfc(text):
"""Perform unicode composition."""
if text is None:
return None
if not hasattr(compose_nfc, '_tr'):
compose_nfc._tr = Transliterator.createInstance('Any-NFC')
return compose_nfc._tr.transliterate(text)
def strip_quotes(text):
"""Remove double or single quotes surrounding a string."""
if text is None:
return
return QUOTES_RE.sub('\\1', text)
def category_replace(text, replacements=UNICODE_CATEGORIES):
"""Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
"""
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters)
def remove_control_chars(text):
"""Remove just the control codes from a piece of text."""
return category_replace(text, replacements=CONTROL_CODES)
def remove_byte_order_mark(text):
"""Remove a BOM from the beginning of the text."""
return BOM_RE.sub('', text)
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
pudo/normality
|
normality/cleaning.py
|
collapse_spaces
|
python
|
def collapse_spaces(text):
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
Remove newlines, tabs and multiple spaces with single spaces.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/cleaning.py#L82-L86
| null |
# coding: utf-8
from __future__ import unicode_literals
import re
import six
from icu import Transliterator
from unicodedata import category
from normality.constants import UNICODE_CATEGORIES, CONTROL_CODES, WS
COLLAPSE_RE = re.compile(r'\s+', re.U)
BOM_RE = re.compile('^\ufeff', re.U)
UNSAFE_RE = re.compile('\x00', re.U)
QUOTES_RE = re.compile('^["\'](.*)["\']$')
def decompose_nfkd(text):
"""Perform unicode compatibility decomposition.
This will replace some non-standard value representations in unicode and
normalise them, while also separating characters and their diacritics into
two separate codepoints.
"""
if text is None:
return None
if not hasattr(decompose_nfkd, '_tr'):
decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD')
return decompose_nfkd._tr.transliterate(text)
def compose_nfc(text):
"""Perform unicode composition."""
if text is None:
return None
if not hasattr(compose_nfc, '_tr'):
compose_nfc._tr = Transliterator.createInstance('Any-NFC')
return compose_nfc._tr.transliterate(text)
def strip_quotes(text):
"""Remove double or single quotes surrounding a string."""
if text is None:
return
return QUOTES_RE.sub('\\1', text)
def category_replace(text, replacements=UNICODE_CATEGORIES):
"""Remove characters from a string based on unicode classes.
This is a method for removing non-text characters (such as punctuation,
whitespace, marks and diacritics) from a piece of text by class, rather
than specifying them individually.
"""
if text is None:
return None
characters = []
for character in decompose_nfkd(text):
cat = category(character)
replacement = replacements.get(cat, character)
if replacement is not None:
characters.append(replacement)
return u''.join(characters)
def remove_control_chars(text):
"""Remove just the control codes from a piece of text."""
return category_replace(text, replacements=CONTROL_CODES)
def remove_unsafe_chars(text):
"""Remove unsafe unicode characters from a piece of text."""
if isinstance(text, six.string_types):
text = UNSAFE_RE.sub('', text)
return text
def remove_byte_order_mark(text):
"""Remove a BOM from the beginning of the text."""
return BOM_RE.sub('', text)
|
pudo/normality
|
normality/__init__.py
|
normalize
|
python
|
def normalize(text, lowercase=True, collapse=True, latinize=False, ascii=False,
encoding_default='utf-8', encoding=None,
replace_categories=UNICODE_CATEGORIES):
text = stringify(text, encoding_default=encoding_default,
encoding=encoding)
if text is None:
return
if lowercase:
# Yeah I made a Python package for this.
text = text.lower()
if ascii:
# A stricter form of transliteration that leaves only ASCII
# characters.
text = ascii_text(text)
elif latinize:
# Perform unicode-based transliteration, e.g. of cyricllic
# or CJK scripts into latin.
text = latinize_text(text)
if text is None:
return
# Perform unicode category-based character replacement. This is
# used to filter out whole classes of characters, such as symbols,
# punctuation, or whitespace-like characters.
text = category_replace(text, replace_categories)
if collapse:
# Remove consecutive whitespace.
text = collapse_spaces(text)
return text
|
The main normalization function for text.
This will take a string and apply a set of transformations to it so
that it can be processed more easily afterwards. Arguments:
* ``lowercase``: not very mysterious.
* ``collapse``: replace multiple whitespace-like characters with a
single whitespace. This is especially useful with category replacement
which can lead to a lot of whitespace.
* ``decompose``: apply a unicode normalization (NFKD) to separate
simple characters and their diacritics.
* ``replace_categories``: This will perform a replacement of whole
classes of unicode characters (e.g. symbols, marks, numbers) with a
given character. It is used to replace any non-text elements of the
input string.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/__init__.py#L9-L57
|
[
"def collapse_spaces(text):\n \"\"\"Remove newlines, tabs and multiple spaces with single spaces.\"\"\"\n if not isinstance(text, six.string_types):\n return text\n return COLLAPSE_RE.sub(WS, text).strip(WS)\n",
"def category_replace(text, replacements=UNICODE_CATEGORIES):\n \"\"\"Remove characters from a string based on unicode classes.\n\n This is a method for removing non-text characters (such as punctuation,\n whitespace, marks and diacritics) from a piece of text by class, rather\n than specifying them individually.\n \"\"\"\n if text is None:\n return None\n characters = []\n for character in decompose_nfkd(text):\n cat = category(character)\n replacement = replacements.get(cat, character)\n if replacement is not None:\n characters.append(replacement)\n return u''.join(characters)\n",
"def latinize_text(text, ascii=False):\n \"\"\"Transliterate the given text to the latin script.\n\n This attempts to convert a given text to latin script using the\n closest match of characters vis a vis the original script.\n \"\"\"\n if text is None or not isinstance(text, six.string_types) or not len(text):\n return text\n\n if ascii:\n if not hasattr(latinize_text, '_ascii'):\n # Transform to latin, separate accents, decompose, remove\n # symbols, compose, push to ASCII\n latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') # noqa\n return latinize_text._ascii.transliterate(text)\n\n if not hasattr(latinize_text, '_tr'):\n latinize_text._tr = Transliterator.createInstance('Any-Latin')\n return latinize_text._tr.transliterate(text)\n",
"def ascii_text(text):\n \"\"\"Transliterate the given text and make sure it ends up as ASCII.\"\"\"\n text = latinize_text(text, ascii=True)\n if isinstance(text, six.text_type):\n text = text.encode('ascii', 'ignore').decode('ascii')\n return text\n",
"def stringify(value, encoding_default='utf-8', encoding=None):\n \"\"\"Brute-force convert a given object to a string.\n\n This will attempt an increasingly mean set of conversions to make a given\n object into a unicode string. It is guaranteed to either return unicode or\n None, if all conversions failed (or the value is indeed empty).\n \"\"\"\n if value is None:\n return None\n\n if not isinstance(value, six.text_type):\n if isinstance(value, (date, datetime)):\n return value.isoformat()\n elif isinstance(value, (float, Decimal)):\n return Decimal(value).to_eng_string()\n elif isinstance(value, six.binary_type):\n if encoding is None:\n encoding = guess_encoding(value, default=encoding_default)\n value = value.decode(encoding, 'replace')\n value = remove_byte_order_mark(value)\n value = remove_unsafe_chars(value)\n else:\n value = six.text_type(value)\n\n # XXX: is this really a good idea?\n value = value.strip()\n if not len(value):\n return None\n return value\n"
] |
from normality.cleaning import collapse_spaces, category_replace
from normality.constants import UNICODE_CATEGORIES, WS
from normality.transliteration import latinize_text, ascii_text
from normality.encoding import guess_encoding, guess_file_encoding # noqa
from normality.stringify import stringify # noqa
from normality.paths import safe_filename # noqa
def slugify(text, sep='-'):
"""A simple slug generator."""
text = stringify(text)
if text is None:
return None
text = text.replace(sep, WS)
text = normalize(text, ascii=True)
if text is None:
return None
return text.replace(WS, sep)
|
pudo/normality
|
normality/__init__.py
|
slugify
|
python
|
def slugify(text, sep='-'):
text = stringify(text)
if text is None:
return None
text = text.replace(sep, WS)
text = normalize(text, ascii=True)
if text is None:
return None
return text.replace(WS, sep)
|
A simple slug generator.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/__init__.py#L60-L69
|
[
"def normalize(text, lowercase=True, collapse=True, latinize=False, ascii=False,\n encoding_default='utf-8', encoding=None,\n replace_categories=UNICODE_CATEGORIES):\n \"\"\"The main normalization function for text.\n\n This will take a string and apply a set of transformations to it so\n that it can be processed more easily afterwards. Arguments:\n\n * ``lowercase``: not very mysterious.\n * ``collapse``: replace multiple whitespace-like characters with a\n single whitespace. This is especially useful with category replacement\n which can lead to a lot of whitespace.\n * ``decompose``: apply a unicode normalization (NFKD) to separate\n simple characters and their diacritics.\n * ``replace_categories``: This will perform a replacement of whole\n classes of unicode characters (e.g. symbols, marks, numbers) with a\n given character. It is used to replace any non-text elements of the\n input string.\n \"\"\"\n text = stringify(text, encoding_default=encoding_default,\n encoding=encoding)\n if text is None:\n return\n\n if lowercase:\n # Yeah I made a Python package for this.\n text = text.lower()\n\n if ascii:\n # A stricter form of transliteration that leaves only ASCII\n # characters.\n text = ascii_text(text)\n elif latinize:\n # Perform unicode-based transliteration, e.g. of cyricllic\n # or CJK scripts into latin.\n text = latinize_text(text)\n\n if text is None:\n return\n\n # Perform unicode category-based character replacement. This is\n # used to filter out whole classes of characters, such as symbols,\n # punctuation, or whitespace-like characters.\n text = category_replace(text, replace_categories)\n\n if collapse:\n # Remove consecutive whitespace.\n text = collapse_spaces(text)\n return text\n",
"def stringify(value, encoding_default='utf-8', encoding=None):\n \"\"\"Brute-force convert a given object to a string.\n\n This will attempt an increasingly mean set of conversions to make a given\n object into a unicode string. It is guaranteed to either return unicode or\n None, if all conversions failed (or the value is indeed empty).\n \"\"\"\n if value is None:\n return None\n\n if not isinstance(value, six.text_type):\n if isinstance(value, (date, datetime)):\n return value.isoformat()\n elif isinstance(value, (float, Decimal)):\n return Decimal(value).to_eng_string()\n elif isinstance(value, six.binary_type):\n if encoding is None:\n encoding = guess_encoding(value, default=encoding_default)\n value = value.decode(encoding, 'replace')\n value = remove_byte_order_mark(value)\n value = remove_unsafe_chars(value)\n else:\n value = six.text_type(value)\n\n # XXX: is this really a good idea?\n value = value.strip()\n if not len(value):\n return None\n return value\n"
] |
from normality.cleaning import collapse_spaces, category_replace
from normality.constants import UNICODE_CATEGORIES, WS
from normality.transliteration import latinize_text, ascii_text
from normality.encoding import guess_encoding, guess_file_encoding # noqa
from normality.stringify import stringify # noqa
from normality.paths import safe_filename # noqa
def normalize(text, lowercase=True, collapse=True, latinize=False, ascii=False,
encoding_default='utf-8', encoding=None,
replace_categories=UNICODE_CATEGORIES):
"""The main normalization function for text.
This will take a string and apply a set of transformations to it so
that it can be processed more easily afterwards. Arguments:
* ``lowercase``: not very mysterious.
* ``collapse``: replace multiple whitespace-like characters with a
single whitespace. This is especially useful with category replacement
which can lead to a lot of whitespace.
* ``decompose``: apply a unicode normalization (NFKD) to separate
simple characters and their diacritics.
* ``replace_categories``: This will perform a replacement of whole
classes of unicode characters (e.g. symbols, marks, numbers) with a
given character. It is used to replace any non-text elements of the
input string.
"""
text = stringify(text, encoding_default=encoding_default,
encoding=encoding)
if text is None:
return
if lowercase:
# Yeah I made a Python package for this.
text = text.lower()
if ascii:
# A stricter form of transliteration that leaves only ASCII
# characters.
text = ascii_text(text)
elif latinize:
# Perform unicode-based transliteration, e.g. of cyricllic
# or CJK scripts into latin.
text = latinize_text(text)
if text is None:
return
# Perform unicode category-based character replacement. This is
# used to filter out whole classes of characters, such as symbols,
# punctuation, or whitespace-like characters.
text = category_replace(text, replace_categories)
if collapse:
# Remove consecutive whitespace.
text = collapse_spaces(text)
return text
|
pudo/normality
|
normality/transliteration.py
|
latinize_text
|
python
|
def latinize_text(text, ascii=False):
if text is None or not isinstance(text, six.string_types) or not len(text):
return text
if ascii:
if not hasattr(latinize_text, '_ascii'):
# Transform to latin, separate accents, decompose, remove
# symbols, compose, push to ASCII
latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') # noqa
return latinize_text._ascii.transliterate(text)
if not hasattr(latinize_text, '_tr'):
latinize_text._tr = Transliterator.createInstance('Any-Latin')
return latinize_text._tr.transliterate(text)
|
Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/transliteration.py#L18-L36
| null |
# coding: utf-8
"""
Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
Transliteration requires an extensive unicode mapping. Since all
Python implementations are either GPL-licensed (and thus more
restrictive than this library) or come with a massive C code
dependency, this module requires neither but will use a package
if it is installed.
"""
import six
from icu import Transliterator
def ascii_text(text):
"""Transliterate the given text and make sure it ends up as ASCII."""
text = latinize_text(text, ascii=True)
if isinstance(text, six.text_type):
text = text.encode('ascii', 'ignore').decode('ascii')
return text
|
pudo/normality
|
normality/transliteration.py
|
ascii_text
|
python
|
def ascii_text(text):
text = latinize_text(text, ascii=True)
if isinstance(text, six.text_type):
text = text.encode('ascii', 'ignore').decode('ascii')
return text
|
Transliterate the given text and make sure it ends up as ASCII.
|
train
|
https://github.com/pudo/normality/blob/b53cc2c6e5c6205573d2010f72d90808710a4b58/normality/transliteration.py#L39-L44
|
[
"def latinize_text(text, ascii=False):\n \"\"\"Transliterate the given text to the latin script.\n\n This attempts to convert a given text to latin script using the\n closest match of characters vis a vis the original script.\n \"\"\"\n if text is None or not isinstance(text, six.string_types) or not len(text):\n return text\n\n if ascii:\n if not hasattr(latinize_text, '_ascii'):\n # Transform to latin, separate accents, decompose, remove\n # symbols, compose, push to ASCII\n latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') # noqa\n return latinize_text._ascii.transliterate(text)\n\n if not hasattr(latinize_text, '_tr'):\n latinize_text._tr = Transliterator.createInstance('Any-Latin')\n return latinize_text._tr.transliterate(text)\n"
] |
# coding: utf-8
"""
Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
Transliteration requires an extensive unicode mapping. Since all
Python implementations are either GPL-licensed (and thus more
restrictive than this library) or come with a massive C code
dependency, this module requires neither but will use a package
if it is installed.
"""
import six
from icu import Transliterator
def latinize_text(text, ascii=False):
"""Transliterate the given text to the latin script.
This attempts to convert a given text to latin script using the
closest match of characters vis a vis the original script.
"""
if text is None or not isinstance(text, six.string_types) or not len(text):
return text
if ascii:
if not hasattr(latinize_text, '_ascii'):
# Transform to latin, separate accents, decompose, remove
# symbols, compose, push to ASCII
latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') # noqa
return latinize_text._ascii.transliterate(text)
if not hasattr(latinize_text, '_tr'):
latinize_text._tr = Transliterator.createInstance('Any-Latin')
return latinize_text._tr.transliterate(text)
|
UDST/osmnet
|
osmnet/config.py
|
format_check
|
python
|
def format_check(settings):
valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name',
'log_filename', 'keep_osm_tags']
for key in list(settings.keys()):
assert key in valid_keys, \
('{} not found in list of valid configuation keys').format(key)
assert isinstance(key, str), ('{} must be a string').format(key)
if key == 'keep_osm_tags':
assert isinstance(settings[key], list), \
('{} must be a list').format(key)
for value in settings[key]:
assert all(isinstance(element, str) for element in value), \
'all elements must be a string'
if key == 'log_file' or key == 'log_console':
assert isinstance(settings[key], bool), \
('{} must be boolean').format(key)
|
Check the format of a osmnet_config object.
Parameters
----------
settings : dict
osmnet_config as a dictionary
Returns
-------
Nothing
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/config.py#L2-L30
| null |
class osmnet_config(object):
"""
A set of configuration variables to initiate the configuration settings
for osmnet.
Parameters
----------
logs_folder : str
location to write log files
log_file : bool
if true, save log output to a log file in logs_folder
log_console : bool
if true, print log output to the console
log_name : str
name of the logger
log_filename : str
name of the log file
keep_osm_tags : list
list of OpenStreetMap tags to save from way elements and preserve in
network edge table
"""
def __init__(self,
logs_folder='logs',
log_file=True,
log_console=False,
log_name='osmnet',
log_filename='osmnet',
keep_osm_tags=['name', 'ref', 'highway', 'service', 'bridge',
'tunnel', 'access', 'oneway', 'toll', 'lanes',
'maxspeed', 'hgv', 'hov', 'area', 'width',
'est_width', 'junction']):
self.logs_folder = logs_folder
self.log_file = log_file
self.log_console = log_console
self.log_name = log_name
self.log_filename = log_filename
self.keep_osm_tags = keep_osm_tags
def to_dict(self):
"""
Return a dict representation of an osmnet osmnet_config instance.
"""
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
}
# instantiate the osmnet configuration object and check format
settings = osmnet_config()
format_check(settings.to_dict())
|
UDST/osmnet
|
osmnet/config.py
|
osmnet_config.to_dict
|
python
|
def to_dict(self):
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
}
|
Return a dict representation of an osmnet osmnet_config instance.
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/config.py#L73-L83
| null |
class osmnet_config(object):
"""
A set of configuration variables to initiate the configuration settings
for osmnet.
Parameters
----------
logs_folder : str
location to write log files
log_file : bool
if true, save log output to a log file in logs_folder
log_console : bool
if true, print log output to the console
log_name : str
name of the logger
log_filename : str
name of the log file
keep_osm_tags : list
list of OpenStreetMap tags to save from way elements and preserve in
network edge table
"""
def __init__(self,
logs_folder='logs',
log_file=True,
log_console=False,
log_name='osmnet',
log_filename='osmnet',
keep_osm_tags=['name', 'ref', 'highway', 'service', 'bridge',
'tunnel', 'access', 'oneway', 'toll', 'lanes',
'maxspeed', 'hgv', 'hov', 'area', 'width',
'est_width', 'junction']):
self.logs_folder = logs_folder
self.log_file = log_file
self.log_console = log_console
self.log_name = log_name
self.log_filename = log_filename
self.keep_osm_tags = keep_osm_tags
|
UDST/osmnet
|
osmnet/utils.py
|
great_circle_dist
|
python
|
def great_circle_dist(lat1, lon1, lat2, lon2):
radius = 6372795 # meters
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
# formula from:
# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula
a = math.pow(math.sin(dlat / 2), 2)
b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
d = 2 * radius * math.asin(math.sqrt(a + b))
return d
|
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/utils.py#L17-L49
| null |
# The following logging functions were modified from the osmnx library and
# used with permission from the author Geoff Boeing:
# log, get_logger: https://github.com/gboeing/osmnx/blob/master/osmnx/utils.py
from __future__ import division
import math
import logging as lg
import unicodedata
import sys
import datetime as dt
import os
from osmnet import config
def log(message, level=None, name=None, filename=None):
"""
Write a message to the log file and/or print to the console.
Parameters
----------
message : string
the content of the message to log
level : int
one of the logger.level constants
name : string
name of the logger
filename : string
name of the log file
Returns
-------
None
"""
if level is None:
level = lg.INFO
if name is None:
name = config.settings.log_name
if filename is None:
filename = config.settings.log_filename
if config.settings.log_file:
# get the current logger or create a new one then log message at
# requested level
logger = get_logger(level=level, name=name, filename=filename)
if level == lg.DEBUG:
logger.debug(message)
elif level == lg.INFO:
logger.info(message)
elif level == lg.WARNING:
logger.warning(message)
elif level == lg.ERROR:
logger.error(message)
# if logging to console is turned on, convert message to ascii and print
# to the console only
if config.settings.log_console: # pragma: no cover
# capture current stdout, then switch it to the console, print the
# message, then switch back to what had been the stdout
# this prevents logging to notebook - instead, it goes to console
standard_out = sys.stdout
sys.stdout = sys.__stdout__
# convert message to ascii for proper console display in windows
# terminals
message = unicodedata.normalize(
'NFKD', str(message)).encode('ascii', errors='replace').decode()
print(message)
sys.stdout = standard_out
# otherwise print out standard statement
else:
print(message)
def get_logger(level=None, name=None, filename=None):
"""
Create a logger or return the current one if already instantiated.
Parameters
----------
level : int
one of the logger.level constants
name : string
name of the logger
filename : string
name of the log file
Returns
-------
logger : logger.logger
"""
if level is None:
level = config.settings.log_level
if name is None:
name = config.settings.log_name
if filename is None:
filename = config.settings.log_filename
logger = lg.getLogger(name)
# if a logger with this name is not already established
if not getattr(logger, 'handler_set', None):
todays_date = dt.datetime.today().strftime('%Y_%m_%d')
log_filename = '{}/{}_{}.log'.format(config.settings.logs_folder,
filename, todays_date)
if not os.path.exists(config.settings.logs_folder):
os.makedirs(config.settings.logs_folder)
# create file handler and log formatter and establish settings
handler = lg.FileHandler(log_filename, encoding='utf-8')
formatter = lg.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
logger.handler_set = True
return logger
|
UDST/osmnet
|
osmnet/load.py
|
osm_filter
|
python
|
def osm_filter(network_type):
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
|
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L29-L67
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
osm_net_download
|
python
|
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
|
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L70-L200
|
[
"def log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = lg.INFO\n if name is None:\n name = config.settings.log_name\n if filename is None:\n filename = config.settings.log_filename\n\n if config.settings.log_file:\n # get the current logger or create a new one then log message at\n # requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print\n # to the console only\n if config.settings.log_console: # pragma: no cover\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout\n # this prevents logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for proper console display in windows\n # terminals\n message = unicodedata.normalize(\n 'NFKD', str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n # otherwise print out standard statement\n else:\n print(message)\n",
"def osm_filter(network_type):\n \"\"\"\n Create a filter to query Overpass API for the specified OSM network type.\n\n Parameters\n ----------\n network_type : string, {'walk', 'drive'} denoting the type of street\n network to extract\n\n Returns\n -------\n osm_filter : string\n \"\"\"\n filters = {}\n\n # drive: select only roads that are drivable by normal 2 wheel drive\n # passenger vehicles both private and public\n # roads. Filter out un-drivable roads and service roads tagged as parking,\n # driveway, or emergency-access\n filters['drive'] = ('[\"highway\"!~\"cycleway|footway|path|pedestrian|steps'\n '|track|proposed|construction|bridleway|abandoned'\n '|platform|raceway|service\"]'\n '[\"motor_vehicle\"!~\"no\"][\"motorcar\"!~\"no\"]'\n '[\"service\"!~\"parking|parking_aisle|driveway'\n '|emergency_access\"]')\n\n # walk: select only roads and pathways that allow pedestrian access both\n # private and public pathways and roads.\n # Filter out limited access roadways and allow service roads\n filters['walk'] = ('[\"highway\"!~\"motor|proposed|construction|abandoned'\n '|platform|raceway\"][\"foot\"!~\"no\"]'\n '[\"pedestrians\"!~\"no\"]')\n\n if network_type in filters:\n osm_filter = filters[network_type]\n else:\n raise ValueError('unknown network_type \"{}\"'.format(network_type))\n\n return osm_filter\n",
"def project_geometry(geometry, crs, to_latlong=False):\n \"\"\"\n Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa\n\n Parameters\n ----------\n geometry : shapely Polygon or MultiPolygon\n the geometry to project\n crs : int\n the starting coordinate reference system of the passed-in geometry\n to_latlong : bool\n if True, project from crs to WGS84, if False, project\n from crs to local UTM zone\n\n Returns\n -------\n geometry_proj, crs : tuple (projected shapely geometry, crs of the\n projected geometry)\n \"\"\"\n gdf = gpd.GeoDataFrame()\n gdf.crs = crs\n gdf.name = 'geometry to project'\n gdf['geometry'] = None\n gdf.loc[0, 'geometry'] = geometry\n gdf_proj = project_gdf(gdf, to_latlong=to_latlong)\n geometry_proj = gdf_proj['geometry'].iloc[0]\n return geometry_proj, gdf_proj.crs\n",
"def consolidate_subdivide_geometry(geometry, max_query_area_size):\n \"\"\"\n Consolidate a geometry into a convex hull, then subdivide it into\n smaller sub-polygons if its area exceeds max size (in geometry's units).\n\n Parameters\n ----------\n geometry : shapely Polygon or MultiPolygon\n the geometry to consolidate and subdivide\n max_query_area_size : float\n max area for any part of the geometry, in the units the geometry is\n in: any polygon bigger will get divided up for multiple queries to\n the Overpass API (default is 50,000 * 50,000 units\n (ie, 50km x 50km in area, if units are meters))\n\n Returns\n -------\n geometry : Polygon or MultiPolygon\n \"\"\"\n\n # let the linear length of the quadrats (with which to subdivide the\n # geometry) be the square root of max area size\n quadrat_width = math.sqrt(max_query_area_size)\n\n if not isinstance(geometry, (Polygon, MultiPolygon)):\n raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')\n\n # if geometry is a MultiPolygon OR a single Polygon whose area exceeds\n # the max size, get the convex hull around the geometry\n if isinstance(\n geometry, MultiPolygon) or \\\n (isinstance(\n geometry, Polygon) and geometry.area > max_query_area_size):\n geometry = geometry.convex_hull\n\n # if geometry area exceeds max size, subdivide it into smaller sub-polygons\n if geometry.area > max_query_area_size:\n geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)\n\n if isinstance(geometry, Polygon):\n geometry = MultiPolygon([geometry])\n\n return geometry\n",
"def overpass_request(data, pause_duration=None, timeout=180,\n error_pause_duration=None):\n \"\"\"\n Send a request to the Overpass API via HTTP POST and return the\n JSON response\n\n Parameters\n ----------\n data : dict or OrderedDict\n key-value pairs of parameters to post to Overpass API\n pause_duration : int\n how long to pause in seconds before requests, if None, will query\n Overpass API status endpoint\n to find when next slot is available\n timeout : int\n the timeout interval for the requests library\n error_pause_duration : int\n how long to pause in seconds before re-trying requests if error\n\n Returns\n -------\n response_json : dict\n \"\"\"\n\n # define the Overpass API URL, then construct a GET-style URL\n url = 'http://www.overpass-api.de/api/interpreter'\n\n start_time = time.time()\n log('Posting to {} with timeout={}, \"{}\"'.format(url, timeout, data))\n response = requests.post(url, data=data, timeout=timeout)\n\n # get the response size and the domain, log result\n size_kb = len(response.content) / 1000.\n domain = re.findall(r'//(?s)(.*?)/', url)[0]\n log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'\n .format(size_kb, domain, time.time()-start_time))\n\n try:\n response_json = response.json()\n if 'remark' in response_json:\n log('Server remark: \"{}\"'.format(response_json['remark'],\n level=lg.WARNING))\n\n except Exception:\n # 429 = 'too many requests' and 504 = 'gateway timeout' from server\n # overload. handle these errors by recursively\n # calling overpass_request until a valid response is achieved\n if response.status_code in [429, 504]:\n # pause for error_pause_duration seconds before re-trying request\n if error_pause_duration is None:\n error_pause_duration = get_pause_duration()\n log('Server at {} returned status code {} and no JSON data. '\n 'Re-trying request in {:.2f} seconds.'\n .format(domain, response.status_code, error_pause_duration),\n level=lg.WARNING)\n time.sleep(error_pause_duration)\n response_json = overpass_request(data=data,\n pause_duration=pause_duration,\n timeout=timeout)\n\n # else, this was an unhandled status_code, throw an exception\n else:\n log('Server at {} returned status code {} and no JSON data'\n .format(domain, response.status_code), level=lg.ERROR)\n raise Exception('Server returned no JSON data.\\n{} {}\\n{}'\n .format(response, response.reason, response.text))\n\n return response_json\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
overpass_request
|
python
|
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
|
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L203-L270
|
[
"def log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = lg.INFO\n if name is None:\n name = config.settings.log_name\n if filename is None:\n filename = config.settings.log_filename\n\n if config.settings.log_file:\n # get the current logger or create a new one then log message at\n # requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print\n # to the console only\n if config.settings.log_console: # pragma: no cover\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout\n # this prevents logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for proper console display in windows\n # terminals\n message = unicodedata.normalize(\n 'NFKD', str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n # otherwise print out standard statement\n else:\n print(message)\n",
"def overpass_request(data, pause_duration=None, timeout=180,\n error_pause_duration=None):\n \"\"\"\n Send a request to the Overpass API via HTTP POST and return the\n JSON response\n\n Parameters\n ----------\n data : dict or OrderedDict\n key-value pairs of parameters to post to Overpass API\n pause_duration : int\n how long to pause in seconds before requests, if None, will query\n Overpass API status endpoint\n to find when next slot is available\n timeout : int\n the timeout interval for the requests library\n error_pause_duration : int\n how long to pause in seconds before re-trying requests if error\n\n Returns\n -------\n response_json : dict\n \"\"\"\n\n # define the Overpass API URL, then construct a GET-style URL\n url = 'http://www.overpass-api.de/api/interpreter'\n\n start_time = time.time()\n log('Posting to {} with timeout={}, \"{}\"'.format(url, timeout, data))\n response = requests.post(url, data=data, timeout=timeout)\n\n # get the response size and the domain, log result\n size_kb = len(response.content) / 1000.\n domain = re.findall(r'//(?s)(.*?)/', url)[0]\n log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'\n .format(size_kb, domain, time.time()-start_time))\n\n try:\n response_json = response.json()\n if 'remark' in response_json:\n log('Server remark: \"{}\"'.format(response_json['remark'],\n level=lg.WARNING))\n\n except Exception:\n # 429 = 'too many requests' and 504 = 'gateway timeout' from server\n # overload. handle these errors by recursively\n # calling overpass_request until a valid response is achieved\n if response.status_code in [429, 504]:\n # pause for error_pause_duration seconds before re-trying request\n if error_pause_duration is None:\n error_pause_duration = get_pause_duration()\n log('Server at {} returned status code {} and no JSON data. '\n 'Re-trying request in {:.2f} seconds.'\n .format(domain, response.status_code, error_pause_duration),\n level=lg.WARNING)\n time.sleep(error_pause_duration)\n response_json = overpass_request(data=data,\n pause_duration=pause_duration,\n timeout=timeout)\n\n # else, this was an unhandled status_code, throw an exception\n else:\n log('Server at {} returned status code {} and no JSON data'\n .format(domain, response.status_code), level=lg.ERROR)\n raise Exception('Server returned no JSON data.\\n{} {}\\n{}'\n .format(response, response.reason, response.text))\n\n return response_json\n",
"def get_pause_duration(recursive_delay=5, default_duration=10):\n \"\"\"\n Check the Overpass API status endpoint to determine how long to wait until\n next slot is available.\n\n Parameters\n ----------\n recursive_delay : int\n how long to wait between recursive calls if server is currently\n running a query\n default_duration : int\n if fatal error, function falls back on returning this value\n\n Returns\n -------\n pause_duration : int\n \"\"\"\n try:\n response = requests.get('http://overpass-api.de/api/status')\n status = response.text.split('\\n')[3]\n status_first_token = status.split(' ')[0]\n except Exception:\n # if status endpoint cannot be reached or output parsed, log error\n # and return default duration\n log('Unable to query http://overpass-api.de/api/status',\n level=lg.ERROR)\n return default_duration\n\n try:\n # if first token is numeric, it indicates the number of slots\n # available - no wait required\n available_slots = int(status_first_token)\n pause_duration = 0\n except Exception:\n # if first token is 'Slot', it tells you when your slot will be free\n if status_first_token == 'Slot':\n utc_time_str = status.split(' ')[3]\n utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)\n pause_duration = math.ceil(\n (utc_time - dt.datetime.utcnow()).total_seconds())\n pause_duration = max(pause_duration, 1)\n\n # if first token is 'Currently', it is currently running a query so\n # check back in recursive_delay seconds\n elif status_first_token == 'Currently':\n time.sleep(recursive_delay)\n pause_duration = get_pause_duration()\n\n else:\n # any other status is unrecognized - log an error and return\n # default duration\n log('Unrecognized server status: \"{}\"'.format(status),\n level=lg.ERROR)\n return default_duration\n\n return pause_duration\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
get_pause_duration
|
python
|
def get_pause_duration(recursive_delay=5, default_duration=10):
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
|
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L273-L328
|
[
"def log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = lg.INFO\n if name is None:\n name = config.settings.log_name\n if filename is None:\n filename = config.settings.log_filename\n\n if config.settings.log_file:\n # get the current logger or create a new one then log message at\n # requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print\n # to the console only\n if config.settings.log_console: # pragma: no cover\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout\n # this prevents logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for proper console display in windows\n # terminals\n message = unicodedata.normalize(\n 'NFKD', str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n # otherwise print out standard statement\n else:\n print(message)\n",
"def get_pause_duration(recursive_delay=5, default_duration=10):\n \"\"\"\n Check the Overpass API status endpoint to determine how long to wait until\n next slot is available.\n\n Parameters\n ----------\n recursive_delay : int\n how long to wait between recursive calls if server is currently\n running a query\n default_duration : int\n if fatal error, function falls back on returning this value\n\n Returns\n -------\n pause_duration : int\n \"\"\"\n try:\n response = requests.get('http://overpass-api.de/api/status')\n status = response.text.split('\\n')[3]\n status_first_token = status.split(' ')[0]\n except Exception:\n # if status endpoint cannot be reached or output parsed, log error\n # and return default duration\n log('Unable to query http://overpass-api.de/api/status',\n level=lg.ERROR)\n return default_duration\n\n try:\n # if first token is numeric, it indicates the number of slots\n # available - no wait required\n available_slots = int(status_first_token)\n pause_duration = 0\n except Exception:\n # if first token is 'Slot', it tells you when your slot will be free\n if status_first_token == 'Slot':\n utc_time_str = status.split(' ')[3]\n utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)\n pause_duration = math.ceil(\n (utc_time - dt.datetime.utcnow()).total_seconds())\n pause_duration = max(pause_duration, 1)\n\n # if first token is 'Currently', it is currently running a query so\n # check back in recursive_delay seconds\n elif status_first_token == 'Currently':\n time.sleep(recursive_delay)\n pause_duration = get_pause_duration()\n\n else:\n # any other status is unrecognized - log an error and return\n # default duration\n log('Unrecognized server status: \"{}\"'.format(status),\n level=lg.ERROR)\n return default_duration\n\n return pause_duration\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
consolidate_subdivide_geometry
|
python
|
def consolidate_subdivide_geometry(geometry, max_query_area_size):
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
|
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L331-L373
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
quadrat_cut_geometry
|
python
|
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
|
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L376-L421
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
project_geometry
|
python
|
def project_geometry(geometry, crs, to_latlong=False):
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
|
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L424-L450
|
[
"def project_gdf(gdf, to_latlong=False, verbose=False):\n \"\"\"\n Project a GeoDataFrame to the UTM zone appropriate for its geometries'\n centroid. The calculation works well for most latitudes,\n however it will not work well for some far northern locations.\n\n Parameters\n ----------\n gdf : GeoDataFrame\n the gdf to be projected to UTM\n to_latlong : bool\n if True, projects to WGS84 instead of to UTM\n\n Returns\n -------\n gdf : GeoDataFrame\n \"\"\"\n assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'\n start_time = time.time()\n\n if to_latlong:\n # if to_latlong is True, project the gdf to WGS84\n latlong_crs = {'init': 'epsg:4326'}\n projected_gdf = gdf.to_crs(latlong_crs)\n if not hasattr(gdf, 'name'):\n gdf.name = 'unnamed'\n if verbose:\n log('Projected the GeoDataFrame \"{}\" to EPSG 4326 in {:,.2f} '\n 'seconds'.format(gdf.name, time.time()-start_time))\n else:\n # else, project the gdf to UTM\n # if GeoDataFrame is already in UTM, return it\n if (gdf.crs is not None) and ('proj' in gdf.crs) \\\n and (gdf.crs['proj'] == 'utm'):\n return gdf\n\n # calculate the centroid of the union of all the geometries in the\n # GeoDataFrame\n avg_longitude = gdf['geometry'].unary_union.centroid.x\n\n # calculate the UTM zone from this avg longitude and define the\n # UTM CRS to project\n utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)\n utm_crs = {'datum': 'NAD83',\n 'ellps': 'GRS80',\n 'proj': 'utm',\n 'zone': utm_zone,\n 'units': 'm'}\n\n # project the GeoDataFrame to the UTM CRS\n projected_gdf = gdf.to_crs(utm_crs)\n if not hasattr(gdf, 'name'):\n gdf.name = 'unnamed'\n if verbose:\n log('Projected the GeoDataFrame \"{}\" to UTM-{} in {:,.2f} '\n 'seconds'.format(gdf.name, utm_zone, time.time()-start_time))\n\n projected_gdf.name = gdf.name\n return projected_gdf\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
process_node
|
python
|
def process_node(e):
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
|
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L514-L539
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
process_way
|
python
|
def process_way(e):
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
|
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L542-L572
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
parse_network_osm_query
|
python
|
def parse_network_osm_query(data):
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
|
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L575-L608
|
[
"def process_node(e):\n \"\"\"\n Process a node element entry into a dict suitable for going into a\n Pandas DataFrame.\n\n Parameters\n ----------\n e : dict\n individual node element in downloaded OSM json\n\n Returns\n -------\n node : dict\n\n \"\"\"\n node = {'id': e['id'],\n 'lat': e['lat'],\n 'lon': e['lon']}\n\n if 'tags' in e:\n if e['tags'] is not np.nan:\n for t, v in list(e['tags'].items()):\n if t in config.settings.keep_osm_tags:\n node[t] = v\n\n return node\n",
"def process_way(e):\n \"\"\"\n Process a way element entry into a list of dicts suitable for going into\n a Pandas DataFrame.\n\n Parameters\n ----------\n e : dict\n individual way element in downloaded OSM json\n\n Returns\n -------\n way : dict\n waynodes : list of dict\n\n \"\"\"\n way = {'id': e['id']}\n\n if 'tags' in e:\n if e['tags'] is not np.nan:\n for t, v in list(e['tags'].items()):\n if t in config.settings.keep_osm_tags:\n way[t] = v\n\n # nodes that make up a way\n waynodes = []\n\n for n in e['nodes']:\n waynodes.append({'way_id': e['id'], 'node_id': n})\n\n return way, waynodes\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
ways_in_bbox
|
python
|
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
|
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L611-L658
|
[
"def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,\n network_type='walk', timeout=180, memory=None,\n max_query_area_size=50*1000*50*1000,\n custom_osm_filter=None):\n \"\"\"\n Download OSM ways and nodes within a bounding box from the Overpass API.\n\n Parameters\n ----------\n lat_min : float\n southern latitude of bounding box\n lng_min : float\n eastern longitude of bounding box\n lat_max : float\n northern latitude of bounding box\n lng_max : float\n western longitude of bounding box\n network_type : string\n Specify the network type where value of 'walk' includes roadways\n where pedestrians are allowed and pedestrian\n pathways and 'drive' includes driveable roadways.\n timeout : int\n the timeout interval for requests and to pass to Overpass API\n memory : int\n server memory allocation size for the query, in bytes. If none,\n server will use its default allocation size\n max_query_area_size : float\n max area for any part of the geometry, in the units the geometry is\n in: any polygon bigger will get divided up for multiple queries to\n Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in\n area, if units are meters))\n custom_osm_filter : string, optional\n specify custom arguments for the way[\"highway\"] query to OSM. Must\n follow Overpass API schema. For\n example to request highway ways that are service roads use:\n '[\"highway\"=\"service\"]'\n\n Returns\n -------\n response_json : dict\n \"\"\"\n\n # create a filter to exclude certain kinds of ways based on the requested\n # network_type\n if custom_osm_filter is None:\n request_filter = osm_filter(network_type)\n else:\n request_filter = custom_osm_filter\n\n response_jsons_list = []\n response_jsons = []\n\n # server memory allocation in bytes formatted for Overpass API query\n if memory is None:\n maxsize = ''\n else:\n maxsize = '[maxsize:{}]'.format(memory)\n\n # define the Overpass API query\n # way[\"highway\"] denotes ways with highway keys and {filters} returns\n # ways with the requested key/value. the '>' makes it recurse so we get\n # ways and way nodes. maxsize is in bytes.\n\n # turn bbox into a polygon and project to local UTM\n polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),\n (lng_min, lat_max), (lng_max, lat_max)])\n geometry_proj, crs_proj = project_geometry(polygon,\n crs={'init': 'epsg:4326'})\n\n # subdivide the bbox area poly if it exceeds the max area size\n # (in meters), then project back to WGS84\n geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(\n geometry_proj, max_query_area_size=max_query_area_size)\n geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,\n crs=crs_proj, to_latlong=True)\n log('Requesting network data within bounding box from Overpass API '\n 'in {:,} request(s)'.format(len(geometry)))\n start_time = time.time()\n\n # loop through each polygon in the geometry\n for poly in geometry:\n # represent bbox as lng_max, lat_min, lng_min, lat_max and round\n # lat-longs to 8 decimal places to create\n # consistent URL strings\n lng_max, lat_min, lng_min, lat_max = poly.bounds\n query_template = '[out:json][timeout:{timeout}]{maxsize};' \\\n '(way[\"highway\"]' \\\n '{filters}({lat_min:.8f},{lng_max:.8f},' \\\n '{lat_max:.8f},{lng_min:.8f});>;);out;'\n query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,\n lng_min=lng_min, lng_max=lng_max,\n filters=request_filter,\n timeout=timeout, maxsize=maxsize)\n response_json = overpass_request(data={'data': query_str},\n timeout=timeout)\n\n response_jsons_list.append(response_json)\n\n log('Downloaded OSM network data within bounding box from Overpass '\n 'API in {:,} request(s) and'\n ' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))\n\n # stitch together individual json results\n for json in response_jsons_list:\n try:\n response_jsons.extend(json['elements'])\n except KeyError:\n pass\n\n # remove duplicate records resulting from the json stitching\n start_time = time.time()\n record_count = len(response_jsons)\n\n if record_count == 0:\n raise Exception('Query resulted in no data. Check your query '\n 'parameters: {}'.format(query_str))\n else:\n response_jsons_df = pd.DataFrame.from_records(response_jsons,\n index='id')\n nodes = response_jsons_df[response_jsons_df['type'] == 'node']\n nodes = nodes[~nodes.index.duplicated(keep='first')]\n ways = response_jsons_df[response_jsons_df['type'] == 'way']\n ways = ways[~ways.index.duplicated(keep='first')]\n response_jsons_df = pd.concat([nodes, ways], axis=0)\n response_jsons_df.reset_index(inplace=True)\n response_jsons = response_jsons_df.to_dict(orient='records')\n if record_count - len(response_jsons) > 0:\n log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(\n record_count - len(response_jsons), time.time() - start_time))\n\n return {'elements': response_jsons}\n",
"def parse_network_osm_query(data):\n \"\"\"\n Convert OSM query data to DataFrames of ways and way-nodes.\n\n Parameters\n ----------\n data : dict\n Result of an OSM query.\n\n Returns\n -------\n nodes, ways, waynodes : pandas.DataFrame\n\n \"\"\"\n if len(data['elements']) == 0:\n raise RuntimeError('OSM query results contain no data.')\n\n nodes = []\n ways = []\n waynodes = []\n\n for e in data['elements']:\n if e['type'] == 'node':\n nodes.append(process_node(e))\n elif e['type'] == 'way':\n w, wn = process_way(e)\n ways.append(w)\n waynodes.extend(wn)\n\n nodes = pd.DataFrame.from_records(nodes, index='id')\n ways = pd.DataFrame.from_records(ways, index='id')\n waynodes = pd.DataFrame.from_records(waynodes, index='way_id')\n\n return (nodes, ways, waynodes)\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
intersection_nodes
|
python
|
def intersection_nodes(waynodes):
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
|
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L661-L677
| null |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
node_pairs
|
python
|
def node_pairs(nodes, ways, waynodes, two_way=True):
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
|
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L680-L764
|
[
"def log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = lg.INFO\n if name is None:\n name = config.settings.log_name\n if filename is None:\n filename = config.settings.log_filename\n\n if config.settings.log_file:\n # get the current logger or create a new one then log message at\n # requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print\n # to the console only\n if config.settings.log_console: # pragma: no cover\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout\n # this prevents logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for proper console display in windows\n # terminals\n message = unicodedata.normalize(\n 'NFKD', str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n # otherwise print out standard statement\n else:\n print(message)\n",
"def great_circle_dist(lat1, lon1, lat2, lon2):\n \"\"\"\n Get the distance (in meters) between two lat/lon points\n via the Haversine formula.\n\n Parameters\n ----------\n lat1, lon1, lat2, lon2 : float\n Latitude and longitude in degrees.\n\n Returns\n -------\n dist : float\n Distance in meters.\n\n \"\"\"\n radius = 6372795 # meters\n\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # formula from:\n # http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula\n a = math.pow(math.sin(dlat / 2), 2)\n b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)\n d = 2 * radius * math.asin(math.sqrt(a + b))\n\n return d\n",
"def intersection_nodes(waynodes):\n \"\"\"\n Returns a set of all the nodes that appear in 2 or more ways.\n\n Parameters\n ----------\n waynodes : pandas.DataFrame\n Mapping of way IDs to node IDs as returned by `ways_in_bbox`.\n\n Returns\n -------\n intersections : set\n Node IDs that appear in 2 or more ways.\n\n \"\"\"\n counts = waynodes.node_id.value_counts()\n return set(counts[counts > 1].index.values)\n",
"def pairwise(l):\n return zip(islice(l, 0, len(l)), islice(l, 1, None))\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
"""
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
UDST/osmnet
|
osmnet/load.py
|
network_from_bbox
|
python
|
def network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
bbox=None, network_type='walk', two_way=True,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
start_time = time.time()
if bbox is not None:
assert isinstance(bbox, tuple) \
and len(bbox) == 4, 'bbox must be a 4 element tuple'
assert (lat_min is None) and (lng_min is None) and \
(lat_max is None) and (lng_max is None), \
'lat_min, lng_min, lat_max and lng_max must be None ' \
'if you are using bbox'
lng_max, lat_min, lng_min, lat_max = bbox
assert lat_min is not None, 'lat_min cannot be None'
assert lng_min is not None, 'lng_min cannot be None'
assert lat_max is not None, 'lat_max cannot be None'
assert lng_max is not None, 'lng_max cannot be None'
assert isinstance(lat_min, float) and isinstance(lng_min, float) and \
isinstance(lat_max, float) and isinstance(lng_max, float), \
'lat_min, lng_min, lat_max, and lng_max must be floats'
nodes, ways, waynodes = ways_in_bbox(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter)
log('Returning OSM data with {:,} nodes and {:,} ways...'
.format(len(nodes), len(ways)))
edgesfinal = node_pairs(nodes, ways, waynodes, two_way=two_way)
# make the unique set of nodes that ended up in pairs
node_ids = sorted(set(edgesfinal['from_id'].unique())
.union(set(edgesfinal['to_id'].unique())))
nodesfinal = nodes.loc[node_ids]
nodesfinal = nodesfinal[['lon', 'lat']]
nodesfinal.rename(columns={'lon': 'x', 'lat': 'y'}, inplace=True)
nodesfinal['id'] = nodesfinal.index
edgesfinal.rename(columns={'from_id': 'from', 'to_id': 'to'}, inplace=True)
log('Returning processed graph with {:,} nodes and {:,} edges...'
.format(len(nodesfinal), len(edgesfinal)))
log('Completed OSM data download and Pandana node and edge table '
'creation in {:,.2f} seconds'.format(time.time()-start_time))
return nodesfinal, edgesfinal
|
Make a graph network from a bounding lat/lon box composed of nodes and
edges for use in Pandana street network accessibility calculations.
You may either enter a lat/long box via the four lat_min,
lng_min, lat_max, lng_max parameters or the bbox parameter as a tuple.
Parameters
----------
lat_min : float
southern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_min : float
eastern latitude of bounding box, if this parameter is used the bbox
parameter should be None.
lat_max : float
northern longitude of bounding box, if this parameter is used the bbox
parameter should be None.
lng_max : float
western longitude of bounding box, if this parameter is used the bbox
parameter should be None.
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
example: (-122.304611,37.798933,-122.263412,37.822802)
a bbox can be extracted for an area using: the CSV format bbox from
http://boundingbox.klokantech.com/. If this parameter is used the
lat_min, lng_min, lat_max, lng_max parameters in this function
should be None.
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways where
pedestrians are allowed and pedestrian pathways and 'drive' includes
driveable roadways. To use a custom definition see the
custom_osm_filter parameter. Default is walk.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodesfinal, edgesfinal : pandas.DataFrame
|
train
|
https://github.com/UDST/osmnet/blob/155110a8e38d3646b9dbc3ec729063930cab3d5f/osmnet/load.py#L767-L873
|
[
"def log(message, level=None, name=None, filename=None):\n \"\"\"\n Write a message to the log file and/or print to the console.\n\n Parameters\n ----------\n message : string\n the content of the message to log\n level : int\n one of the logger.level constants\n name : string\n name of the logger\n filename : string\n name of the log file\n\n Returns\n -------\n None\n \"\"\"\n\n if level is None:\n level = lg.INFO\n if name is None:\n name = config.settings.log_name\n if filename is None:\n filename = config.settings.log_filename\n\n if config.settings.log_file:\n # get the current logger or create a new one then log message at\n # requested level\n logger = get_logger(level=level, name=name, filename=filename)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n # if logging to console is turned on, convert message to ascii and print\n # to the console only\n if config.settings.log_console: # pragma: no cover\n # capture current stdout, then switch it to the console, print the\n # message, then switch back to what had been the stdout\n # this prevents logging to notebook - instead, it goes to console\n standard_out = sys.stdout\n sys.stdout = sys.__stdout__\n\n # convert message to ascii for proper console display in windows\n # terminals\n message = unicodedata.normalize(\n 'NFKD', str(message)).encode('ascii', errors='replace').decode()\n print(message)\n sys.stdout = standard_out\n # otherwise print out standard statement\n else:\n print(message)\n",
"def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,\n timeout=180, memory=None,\n max_query_area_size=50*1000*50*1000,\n custom_osm_filter=None):\n \"\"\"\n Get DataFrames of OSM data in a bounding box.\n\n Parameters\n ----------\n lat_min : float\n southern latitude of bounding box\n lng_min : float\n eastern longitude of bounding box\n lat_max : float\n northern latitude of bounding box\n lng_max : float\n western longitude of bounding box\n network_type : {'walk', 'drive'}, optional\n Specify the network type where value of 'walk' includes roadways\n where pedestrians are allowed and pedestrian pathways and 'drive'\n includes driveable roadways.\n timeout : int\n the timeout interval for requests and to pass to Overpass API\n memory : int\n server memory allocation size for the query, in bytes. If none,\n server will use its default allocation size\n max_query_area_size : float\n max area for any part of the geometry, in the units the geometry is\n in: any polygon bigger will get divided up for multiple queries to\n Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in\n area, if units are meters))\n custom_osm_filter : string, optional\n specify custom arguments for the way[\"highway\"] query to OSM. Must\n follow Overpass API schema. For\n example to request highway ways that are service roads use:\n '[\"highway\"=\"service\"]'\n\n Returns\n -------\n nodes, ways, waynodes : pandas.DataFrame\n\n \"\"\"\n return parse_network_osm_query(\n osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,\n lng_max=lng_max, network_type=network_type,\n timeout=timeout, memory=memory,\n max_query_area_size=max_query_area_size,\n custom_osm_filter=custom_osm_filter))\n",
"def node_pairs(nodes, ways, waynodes, two_way=True):\n \"\"\"\n Create a table of node pairs with the distances between them.\n\n Parameters\n ----------\n nodes : pandas.DataFrame\n Must have 'lat' and 'lon' columns.\n ways : pandas.DataFrame\n Table of way metadata.\n waynodes : pandas.DataFrame\n Table linking way IDs to node IDs. Way IDs should be in the index,\n with a column called 'node_ids'.\n two_way : bool, optional\n Whether the routes are two-way. If True, node pairs will only\n occur once. Default is True.\n\n Returns\n -------\n pairs : pandas.DataFrame\n Will have columns of 'from_id', 'to_id', and 'distance'.\n The index will be a MultiIndex of (from id, to id).\n The distance metric is in meters.\n\n \"\"\"\n start_time = time.time()\n\n def pairwise(l):\n return zip(islice(l, 0, len(l)), islice(l, 1, None))\n intersections = intersection_nodes(waynodes)\n waymap = waynodes.groupby(level=0, sort=False)\n pairs = []\n\n for id, row in ways.iterrows():\n nodes_in_way = waymap.get_group(id).node_id.values\n nodes_in_way = [x for x in nodes_in_way if x in intersections]\n\n if len(nodes_in_way) < 2:\n # no nodes to connect in this way\n continue\n\n for from_node, to_node in pairwise(nodes_in_way):\n if from_node != to_node:\n fn = nodes.loc[from_node]\n tn = nodes.loc[to_node]\n\n distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)\n\n col_dict = {'from_id': from_node,\n 'to_id': to_node,\n 'distance': distance}\n\n for tag in config.settings.keep_osm_tags:\n try:\n col_dict.update({tag: row[tag]})\n except KeyError:\n pass\n\n pairs.append(col_dict)\n\n if not two_way:\n\n col_dict = {'from_id': to_node,\n 'to_id': from_node,\n 'distance': distance}\n\n for tag in config.settings.keep_osm_tags:\n try:\n col_dict.update({tag: row[tag]})\n except KeyError:\n pass\n\n pairs.append(col_dict)\n\n pairs = pd.DataFrame.from_records(pairs)\n if pairs.empty:\n raise Exception('Query resulted in no connected node pairs. Check '\n 'your query parameters or bounding box')\n else:\n pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,\n pairs['to_id'].values])\n log('Edge node pairs completed. Took {:,.2f} seconds'\n .format(time.time()-start_time))\n\n return pairs\n"
] |
# The following functions to download osm data, setup a recursive api request
# and subdivide bbox queries into smaller bboxes were modified from the
# osmnx library and used with permission from the author Geoff Boeing
# osm_net_download, overpass_request, get_pause_duration,
# consolidate_subdivide_geometry, quadrat_cut_geometry:
# https://github.com/gboeing/osmnx/blob/master/osmnx/core.py
# project_geometry, project_gdf:
# https://github.com/gboeing/osmnx/blob/master/osmnx/projection.py
from __future__ import division
from itertools import islice
import re
import pandas as pd
import requests
import math
import time
import logging as lg
import numpy as np
from shapely.geometry import LineString, Polygon, MultiPolygon
from shapely.ops import unary_union
from dateutil import parser as date_parser
import datetime as dt
import geopandas as gpd
from osmnet import config
from osmnet.utils import log, great_circle_dist as gcd
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
def project_gdf(gdf, to_latlong=False, verbose=False):
"""
Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid. The calculation works well for most latitudes,
however it will not work well for some far northern locations.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected to UTM
to_latlong : bool
if True, projects to WGS84 instead of to UTM
Returns
-------
gdf : GeoDataFrame
"""
assert len(gdf) > 0, 'You cannot project an empty GeoDataFrame.'
start_time = time.time()
if to_latlong:
# if to_latlong is True, project the gdf to WGS84
latlong_crs = {'init': 'epsg:4326'}
projected_gdf = gdf.to_crs(latlong_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to EPSG 4326 in {:,.2f} '
'seconds'.format(gdf.name, time.time()-start_time))
else:
# else, project the gdf to UTM
# if GeoDataFrame is already in UTM, return it
if (gdf.crs is not None) and ('proj' in gdf.crs) \
and (gdf.crs['proj'] == 'utm'):
return gdf
# calculate the centroid of the union of all the geometries in the
# GeoDataFrame
avg_longitude = gdf['geometry'].unary_union.centroid.x
# calculate the UTM zone from this avg longitude and define the
# UTM CRS to project
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {'datum': 'NAD83',
'ellps': 'GRS80',
'proj': 'utm',
'zone': utm_zone,
'units': 'm'}
# project the GeoDataFrame to the UTM CRS
projected_gdf = gdf.to_crs(utm_crs)
if not hasattr(gdf, 'name'):
gdf.name = 'unnamed'
if verbose:
log('Projected the GeoDataFrame "{}" to UTM-{} in {:,.2f} '
'seconds'.format(gdf.name, utm_zone, time.time()-start_time))
projected_gdf.name = gdf.name
return projected_gdf
def process_node(e):
"""
Process a node element entry into a dict suitable for going into a
Pandas DataFrame.
Parameters
----------
e : dict
individual node element in downloaded OSM json
Returns
-------
node : dict
"""
node = {'id': e['id'],
'lat': e['lat'],
'lon': e['lon']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
node[t] = v
return node
def process_way(e):
"""
Process a way element entry into a list of dicts suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
individual way element in downloaded OSM json
Returns
-------
way : dict
waynodes : list of dict
"""
way = {'id': e['id']}
if 'tags' in e:
if e['tags'] is not np.nan:
for t, v in list(e['tags'].items()):
if t in config.settings.keep_osm_tags:
way[t] = v
# nodes that make up a way
waynodes = []
for n in e['nodes']:
waynodes.append({'way_id': e['id'], 'node_id': n})
return way, waynodes
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes)
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type,
timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Get DataFrames of OSM data in a bounding box.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : {'walk', 'drive'}, optional
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian pathways and 'drive'
includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
return parse_network_osm_query(
osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min,
lng_max=lng_max, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
custom_osm_filter=custom_osm_filter))
def intersection_nodes(waynodes):
"""
Returns a set of all the nodes that appear in 2 or more ways.
Parameters
----------
waynodes : pandas.DataFrame
Mapping of way IDs to node IDs as returned by `ways_in_bbox`.
Returns
-------
intersections : set
Node IDs that appear in 2 or more ways.
"""
counts = waynodes.node_id.value_counts()
return set(counts[counts > 1].index.values)
def node_pairs(nodes, ways, waynodes, two_way=True):
"""
Create a table of node pairs with the distances between them.
Parameters
----------
nodes : pandas.DataFrame
Must have 'lat' and 'lon' columns.
ways : pandas.DataFrame
Table of way metadata.
waynodes : pandas.DataFrame
Table linking way IDs to node IDs. Way IDs should be in the index,
with a column called 'node_ids'.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once. Default is True.
Returns
-------
pairs : pandas.DataFrame
Will have columns of 'from_id', 'to_id', and 'distance'.
The index will be a MultiIndex of (from id, to id).
The distance metric is in meters.
"""
start_time = time.time()
def pairwise(l):
return zip(islice(l, 0, len(l)), islice(l, 1, None))
intersections = intersection_nodes(waynodes)
waymap = waynodes.groupby(level=0, sort=False)
pairs = []
for id, row in ways.iterrows():
nodes_in_way = waymap.get_group(id).node_id.values
nodes_in_way = [x for x in nodes_in_way if x in intersections]
if len(nodes_in_way) < 2:
# no nodes to connect in this way
continue
for from_node, to_node in pairwise(nodes_in_way):
if from_node != to_node:
fn = nodes.loc[from_node]
tn = nodes.loc[to_node]
distance = round(gcd(fn.lat, fn.lon, tn.lat, tn.lon), 6)
col_dict = {'from_id': from_node,
'to_id': to_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
if not two_way:
col_dict = {'from_id': to_node,
'to_id': from_node,
'distance': distance}
for tag in config.settings.keep_osm_tags:
try:
col_dict.update({tag: row[tag]})
except KeyError:
pass
pairs.append(col_dict)
pairs = pd.DataFrame.from_records(pairs)
if pairs.empty:
raise Exception('Query resulted in no connected node pairs. Check '
'your query parameters or bounding box')
else:
pairs.index = pd.MultiIndex.from_arrays([pairs['from_id'].values,
pairs['to_id'].values])
log('Edge node pairs completed. Took {:,.2f} seconds'
.format(time.time()-start_time))
return pairs
|
fictorial/pygameui
|
pygameui/kvc.py
|
value_for_keypath
|
python
|
def value_for_keypath(obj, path):
val = obj
for part in path.split('.'):
match = re.match(list_index_re, part)
if match is not None:
val = _extract(val, match.group(1))
if not isinstance(val, list) and not isinstance(val, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
val = val[index]
else:
val = _extract(val, part)
if val is None:
return None
return val
|
Get value from walking key path with start object obj.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/kvc.py#L58-L74
|
[
"def _extract(val, key):\n if isinstance(val, dict):\n return val[key]\n return getattr(val, key, None)\n"
] |
"""This module lets you set/get attribute values by walking
a "key path" from a root or start object.
A key path is a string with path part specs delimited by period '.'.
Multiple path part specs are concatenated together to form the
entire path spec.
Each path part spec takes one of two forms:
- identifier
- identifier[integer]
Walks proceed by evaluating each path part spec against the
current object, starting with the given object.
Path part specs work against objects, lists, tuples, and dicts.
Note that a KeyError or IndexError encountered while walking a
key path part spec is not caught. You have to know that the a
walk of the given key path on the given object will work.
An example walk:
class A(object):
def __init__(self):
self.x = dict(y=['hello', 'world'])
class B(object):
def __init__(self):
self.a = A()
b = B()
print value_for_keypath(b, 'a.x.y[1]') # prints 'world'
# part spec context
# --------- -------
# 'a' b.a
# 'x' b.a.x
# 'y[1]' b.a.x.y[1]
"""
AUTHOR = 'Brian Hammond <brian@fictorial.com>'
LICENSE = 'MIT'
__version__ = '0.1.0'
import re
list_index_re = re.compile(r'([^\[]+)\[(\d+)\]')
def _extract(val, key):
if isinstance(val, dict):
return val[key]
return getattr(val, key, None)
def set_value_for_keypath(obj, path, new_value, preserve_child = False):
"""Set attribute value new_value at key path of start object obj.
"""
parts = path.split('.')
last_part = len(parts) - 1
dst = obj
for i, part in enumerate(parts):
match = re.match(list_index_re, part)
if match is not None:
dst = _extract(dst, match.group(1))
if not isinstance(dst, list) and not isinstance(dst, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
if i == last_part:
dst[index] = new_value
else:
dst = dst[index]
else:
if i != last_part:
dst = _extract(dst, part)
else:
if isinstance(dst, dict):
dst[part] = new_value
else:
if not preserve_child:
setattr(dst, part, new_value)
else:
try:
v = getattr(dst, part)
except AttributeError:
setattr(dst, part, new_value)
if __name__ == '__main__':
class A(object):
def __init__(self):
self.x = dict(y=['hello', 'world'])
class B(object):
def __init__(self):
self.a = A()
b = B()
assert value_for_keypath(b, 'a.x.y[1]') == 'world'
set_value_for_keypath(b, 'a.x.y[1]', 2)
assert value_for_keypath(b, 'a.x.y[1]') == 2
|
fictorial/pygameui
|
pygameui/kvc.py
|
set_value_for_keypath
|
python
|
def set_value_for_keypath(obj, path, new_value, preserve_child = False):
parts = path.split('.')
last_part = len(parts) - 1
dst = obj
for i, part in enumerate(parts):
match = re.match(list_index_re, part)
if match is not None:
dst = _extract(dst, match.group(1))
if not isinstance(dst, list) and not isinstance(dst, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
if i == last_part:
dst[index] = new_value
else:
dst = dst[index]
else:
if i != last_part:
dst = _extract(dst, part)
else:
if isinstance(dst, dict):
dst[part] = new_value
else:
if not preserve_child:
setattr(dst, part, new_value)
else:
try:
v = getattr(dst, part)
except AttributeError:
setattr(dst, part, new_value)
|
Set attribute value new_value at key path of start object obj.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/kvc.py#L77-L107
|
[
"def _extract(val, key):\n if isinstance(val, dict):\n return val[key]\n return getattr(val, key, None)\n"
] |
"""This module lets you set/get attribute values by walking
a "key path" from a root or start object.
A key path is a string with path part specs delimited by period '.'.
Multiple path part specs are concatenated together to form the
entire path spec.
Each path part spec takes one of two forms:
- identifier
- identifier[integer]
Walks proceed by evaluating each path part spec against the
current object, starting with the given object.
Path part specs work against objects, lists, tuples, and dicts.
Note that a KeyError or IndexError encountered while walking a
key path part spec is not caught. You have to know that the a
walk of the given key path on the given object will work.
An example walk:
class A(object):
def __init__(self):
self.x = dict(y=['hello', 'world'])
class B(object):
def __init__(self):
self.a = A()
b = B()
print value_for_keypath(b, 'a.x.y[1]') # prints 'world'
# part spec context
# --------- -------
# 'a' b.a
# 'x' b.a.x
# 'y[1]' b.a.x.y[1]
"""
AUTHOR = 'Brian Hammond <brian@fictorial.com>'
LICENSE = 'MIT'
__version__ = '0.1.0'
import re
list_index_re = re.compile(r'([^\[]+)\[(\d+)\]')
def _extract(val, key):
if isinstance(val, dict):
return val[key]
return getattr(val, key, None)
def value_for_keypath(obj, path):
"""Get value from walking key path with start object obj.
"""
val = obj
for part in path.split('.'):
match = re.match(list_index_re, part)
if match is not None:
val = _extract(val, match.group(1))
if not isinstance(val, list) and not isinstance(val, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
val = val[index]
else:
val = _extract(val, part)
if val is None:
return None
return val
if __name__ == '__main__':
class A(object):
def __init__(self):
self.x = dict(y=['hello', 'world'])
class B(object):
def __init__(self):
self.a = A()
b = B()
assert value_for_keypath(b, 'a.x.y[1]') == 'world'
set_value_for_keypath(b, 'a.x.y[1]', 2)
assert value_for_keypath(b, 'a.x.y[1]') == 2
|
fictorial/pygameui
|
pygameui/imageview.py
|
view_for_image_named
|
python
|
def view_for_image_named(image_name):
image = resource.get_image(image_name)
if not image:
return None
return ImageView(pygame.Rect(0, 0, 0, 0), image)
|
Create an ImageView for the given image.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/imageview.py#L64-L72
|
[
"def get_image(name):\n try:\n img = image_cache[name]\n except KeyError:\n path = 'resources/images/%s.png' % name\n path = pkg_resources.resource_filename(package_name, path)\n try:\n logger.debug('loading image %s' % path)\n img = pygame.image.load(path)\n except pygame.error, e:\n logger.warn('failed to load image: %s: %s' % (path, e))\n img = None\n else:\n img = img.convert_alpha()\n image_cache[path] = img\n return img\n"
] |
import pygame
import view
import resource
SCALE_TO_FILL = 0
class ImageView(view.View):
"""A view for displaying an image.
The only 'content scaling mode' currently supported is 'scale-to-fill'.
"""
def __init__(self, frame, img, content_mode=SCALE_TO_FILL):
"""Create an image view from an image.
frame.topleft
where to position the view.
frame.size
if (0, 0) the frame.size is set to the image's size;
otherwise, the image is scaled to this size.
"""
assert img is not None
if frame is None:
frame = pygame.Rect((0, 0), img.get_size())
elif frame.w == 0 and frame.h == 0:
frame.size = img.get_size()
view.View.__init__(self, frame)
self._enabled = False
self.content_mode = content_mode
self.image = img
@property
def image(self):
return self._image
@image.setter
def image(self, new_image):
self._image = new_image
def layout(self):
assert self.padding[0] == 0 and self.padding[1] == 0
if self.content_mode == SCALE_TO_FILL:
self._image = resource.scale_image(self._image, self.frame.size)
else:
assert False, "Unknown content_mode"
view.View.layout(self)
def draw(self):
self.surface = self._image
|
fictorial/pygameui
|
distribute_setup.py
|
main
|
python
|
def main(argv, version=DEFAULT_VERSION):
tarball = download_setuptools()
_install(tarball, _build_install_args(argv))
|
Install or upgrade setuptools and EasyInstall
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/distribute_setup.py#L487-L490
|
[
"def _install(tarball, install_args=()):\n # extracting the tarball\n tmpdir = tempfile.mkdtemp()\n log.warn('Extracting in %s', tmpdir)\n old_wd = os.getcwd()\n try:\n os.chdir(tmpdir)\n tar = tarfile.open(tarball)\n _extractall(tar)\n tar.close()\n\n # going in the directory\n subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])\n os.chdir(subdir)\n log.warn('Now working in %s', subdir)\n\n # installing\n log.warn('Installing Distribute')\n if not _python_cmd('setup.py', 'install', *install_args):\n log.warn('Something went wrong during the installation.')\n log.warn('See the error message above.')\n finally:\n os.chdir(old_wd)\n",
"def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,\n to_dir=os.curdir, delay=15):\n \"\"\"Download distribute from a specified location and return its filename\n\n `version` should be a valid distribute version number that is available\n as an egg for download under the `download_base` URL (which should end\n with a '/'). `to_dir` is the directory where the egg will be downloaded.\n `delay` is the number of seconds to pause before an actual download\n attempt.\n \"\"\"\n # making sure we use the absolute path\n to_dir = os.path.abspath(to_dir)\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n tgz_name = \"distribute-%s.tar.gz\" % version\n url = download_base + tgz_name\n saveto = os.path.join(to_dir, tgz_name)\n src = dst = None\n if not os.path.exists(saveto): # Avoid repeated downloads\n try:\n log.warn(\"Downloading %s\", url)\n src = urlopen(url)\n # Read/write all in one block, so we don't create a corrupt file\n # if the download is interrupted.\n data = src.read()\n dst = open(saveto, \"wb\")\n dst.write(data)\n finally:\n if src:\n src.close()\n if dst:\n dst.close()\n return os.path.realpath(saveto)\n",
"def _build_install_args(argv):\n install_args = []\n user_install = '--user' in argv\n if user_install and sys.version_info < (2,6):\n log.warn(\"--user requires Python 2.6 or later\")\n raise SystemExit(1)\n if user_install:\n install_args.append('--user')\n return install_args\n"
] |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.25"
DEFAULT_URL = "https://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(argv):
install_args = []
user_install = '--user' in argv
if user_install and sys.version_info < (2,6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
if user_install:
install_args.append('--user')
return install_args
if __name__ == '__main__':
main(sys.argv[1:])
|
fictorial/pygameui
|
pygameui/render.py
|
fill_gradient
|
python
|
def fill_gradient(surface, color, gradient,
rect=None, vertical=True, forward=True):
if rect is None:
rect = surface.get_rect()
x1, x2 = rect.left, rect.right
y1, y2 = rect.top, rect.bottom
if vertical:
h = y2 - y1
else:
h = x2 - x1
assert h > 0
if forward:
a, b = color, gradient
else:
b, a = color, gradient
rate = (float(b[0] - a[0]) / h,
float(b[1] - a[1]) / h,
float(b[2] - a[2]) / h)
fn_line = pygame.draw.line
if vertical:
for line in range(y1, y2):
color = (min(max(a[0] + (rate[0] * (line - y1)), 0), 255),
min(max(a[1] + (rate[1] * (line - y1)), 0), 255),
min(max(a[2] + (rate[2] * (line - y1)), 0), 255))
fn_line(surface, color, (x1, line), (x2, line))
else:
for col in range(x1, x2):
color = (min(max(a[0] + (rate[0] * (col - x1)), 0), 255),
min(max(a[1] + (rate[1] * (col - x1)), 0), 255),
min(max(a[2] + (rate[2] * (col - x1)), 0), 255))
fn_line(surface, color, (col, y1), (col, y2))
|
Fill a surface with a linear gradient pattern.
color
starting color
gradient
final color
rect
area to fill; default is surface's rect
vertical
True=vertical; False=horizontal
forward
True=forward; False=reverse
See http://www.pygame.org/wiki/GradientCode
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/render.py#L4-L66
| null |
import pygame
def fillrect(surface, color, rect, vertical=True):
if len(color) == 2: # gradient
fill_gradient(surface, color[0], color[1],
rect=rect, vertical=vertical)
else:
surface.fill(color, rect)
|
fictorial/pygameui
|
pygameui/label.py
|
Label.shrink_wrap
|
python
|
def shrink_wrap(self):
self.frame.size = (self.text_size[0] + self.padding[0] * 2,
self.text_size[1] + self.padding[1] * 2)
|
Tightly bound the current text respecting current padding.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/label.py#L187-L191
| null |
class Label(view.View):
"""Multi-line, word-wrappable, uneditable text view.
Attributes:
halign
CENTER, LEFT, or RIGHT. Horizontal alignment of
text.
valign
CENTER, TOP, or BOTTOM. Vertical alignment of text.
wrap_mode
WORD_WRAP or CLIP. Determines how text is wrapped to
fit within the label's frame width-wise. Text that
is wrapped to multiple rendered lines is clipped at
the bottom of the frame. After setting the text
attribute, the text_size attribute may be used to
resize the label's frame; also see shrink_wrap.
Changing wrap_mode forces a redraw of the label.
text
The text to render.
Changing the text forces a redraw of the label.
Style attributes:
Changing a style attribute does not automatically redraw
text in the new style given that you will likely change
a number of style attributes. Call 'layout' when you
have finished changing style attributes.
Using a new theme automatically restylizes and thus redraws
the text using the new theme's style attributes.
text_color
The color of the text.
text_shadow_color
The color of the fake text-shadow.
text_shadow_offset
The offset of the fake text-shadow in the form
(dx, dy).
font
The font used for rendering the text.
padding
Horizontal and vertical spacing from the label's
interior edges where text is rendered.
"""
def __init__(self, frame, text,
halign=CENTER, valign=CENTER,
wrap=CLIP):
view.View.__init__(self, frame)
self.halign = halign
self.valign = valign
self._wrap_mode = wrap
self._text = text
self._enabled = False
@property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
self.render()
@property
def wrap_mode(self):
return self._wrap_mode
@property
def wrap_mode(self, mode):
self._wrap_mode = mode
self.render()
def layout(self):
self.render()
view.View.layout(self)
def render(self):
"""Force (re)draw the text to cached surfaces.
"""
self._render(self._text)
def _render(self, text):
self.text_surfaces, self.text_shadow_surfaces = [], []
if text is None or len(text) == 0:
self._text = None
self.text_size = (0, 0)
return
text = text.replace("\r\n", "\n").replace("\r", "\n")
wants_shadows = (self.text_shadow_color is not None and
self.text_shadow_offset is not None)
if self._wrap_mode == CLIP:
self._text = re.sub(r'[\n\t]{2, }', ' ', text)
self.text_size = self._render_line(self._text, wants_shadows)
elif self._wrap_mode == WORD_WRAP:
self._render_word_wrapped(text, wants_shadows)
def _render_line(self, line_text, wants_shadows):
line_text = line_text.strip()
text_surface = self.font.render(line_text, True, self.text_color)
self.text_surfaces.append(text_surface)
if wants_shadows:
text_shadow_surface = self.font.render(
line_text, True, self.text_shadow_color)
self.text_shadow_surfaces.append(text_shadow_surface)
return text_surface.get_size()
def _render_word_wrapped(self, text, wants_shadows):
self._text = text
self.text_size = [0, 0]
line_width = 0
max_line_width = self.frame.w - self.padding[0] * 2
line_tokens = []
tokens = re.split(r'(\s)', self._text)
token_widths = {}
for token in tokens:
if len(token) == 0:
continue
token_width, _ = token_widths.setdefault(token,
self.font.size(token))
if token == '\n' or token_width + line_width >= max_line_width:
line_size = self._render_line(''.join(line_tokens),
wants_shadows)
self.text_size[0] = max(self.text_size[0], line_size[0])
self.text_size[1] += line_size[1]
if token == '\n':
line_tokens, line_width = [], 0
else:
line_tokens, line_width = [token], token_width
else:
line_width += token_width
line_tokens.append(token)
if len(line_tokens) > 0:
line_size = self._render_line(''.join(line_tokens),
wants_shadows)
self.text_size[0] = max(self.text_size[0], line_size[0])
self.text_size[1] += line_size[1]
def _determine_top(self):
if self.valign == TOP:
y = self.padding[1]
elif self.valign == CENTER:
y = self.frame.h // 2 - self.text_size[1] // 2
elif self.valign == BOTTOM:
y = self.frame.h - self.padding[1] - self.text_size[1]
return y
def _determine_left(self, text_surface):
w = text_surface.get_size()[0]
if self.halign == LEFT:
x = self.padding[0]
elif self.halign == CENTER:
x = self.frame.w // 2 - w // 2
elif self.halign == RIGHT:
x = self.frame.w - 1 - self.padding[0] - w
return x
def draw(self):
if not view.View.draw(self) or not self._text:
return False
wants_shadows = (self.text_shadow_color is not None and
self.text_shadow_offset is not None)
y = self._determine_top()
for index, text_surface in enumerate(self.text_surfaces):
x = self._determine_left(text_surface)
if wants_shadows:
text_shadow_surface = self.text_shadow_surfaces[index]
top_left = (x + self.text_shadow_offset[0],
y + self.text_shadow_offset[1])
self.surface.blit(text_shadow_surface, top_left)
self.surface.blit(text_surface, (x, y))
y += text_surface.get_size()[1]
return True
def __repr__(self):
if self._text is None:
return ''
return self._text
|
fictorial/pygameui
|
pygameui/view.py
|
View.layout
|
python
|
def layout(self):
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
|
Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L74-L91
|
[
"def scale_image(image, size):\n return pygame.transform.smoothscale(image, size)\n",
"def get_image(name):\n try:\n img = image_cache[name]\n except KeyError:\n path = 'resources/images/%s.png' % name\n path = pkg_resources.resource_filename(package_name, path)\n try:\n logger.debug('loading image %s' % path)\n img = pygame.image.load(path)\n except pygame.error, e:\n logger.warn('failed to load image: %s: %s' % (path, e))\n img = None\n else:\n img = img.convert_alpha()\n image_cache[path] = img\n return img\n"
] |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/view.py
|
View.stylize
|
python
|
def stylize(self):
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
|
Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L209-L227
|
[
"def set_value_for_keypath(obj, path, new_value, preserve_child = False):\n \"\"\"Set attribute value new_value at key path of start object obj.\n \"\"\"\n parts = path.split('.')\n last_part = len(parts) - 1\n dst = obj\n for i, part in enumerate(parts):\n match = re.match(list_index_re, part)\n if match is not None:\n dst = _extract(dst, match.group(1))\n if not isinstance(dst, list) and not isinstance(dst, tuple):\n raise TypeError('expected list/tuple')\n index = int(match.group(2))\n if i == last_part:\n dst[index] = new_value\n else:\n dst = dst[index]\n else:\n if i != last_part:\n dst = _extract(dst, part)\n else:\n if isinstance(dst, dict):\n dst[part] = new_value\n else:\n if not preserve_child:\n setattr(dst, part, new_value)\n else:\n try:\n v = getattr(dst, part)\n except AttributeError:\n setattr(dst, part, new_value)\n",
"def layout(self):\n assert self.get_border_widths()[0] == 0 # top; check for animations\n assert self.padding[0] == 0 and self.padding[1] == 0\n self.message_label.shrink_wrap()\n self.message_label.frame.w = self.frame.w\n self.frame.h = self.message_label.frame.h\n dialog.DialogView.layout(self)\n",
"def layout(self):\n \"\"\"Call to have the view layout itself.\n\n Subclasses should invoke this after laying out child\n views and/or updating its own frame.\n \"\"\"\n if self.shadowed:\n shadow_size = theme.current.shadow_size\n shadowed_frame_size = (self.frame.w + shadow_size,\n self.frame.h + shadow_size)\n self.surface = pygame.Surface(\n shadowed_frame_size, pygame.SRCALPHA, 32)\n shadow_image = resource.get_image('shadow')\n self.shadow_image = resource.scale_image(shadow_image,\n shadowed_frame_size)\n else:\n self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)\n self.shadow_image = None\n"
] |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/view.py
|
View.draw
|
python
|
def draw(self):
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
|
Do not call directly.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L229-L278
|
[
"def fillrect(surface, color, rect, vertical=True):\n if len(color) == 2: # gradient\n fill_gradient(surface, color[0], color[1],\n rect=rect, vertical=vertical)\n else:\n surface.fill(color, rect)\n"
] |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/view.py
|
View.get_border_widths
|
python
|
def get_border_widths(self):
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
|
Return border width for each side top, left, bottom, right.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L280-L284
| null |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/view.py
|
View.hit
|
python
|
def hit(self, pt):
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
|
Find the view (self, child, or None) under the point `pt`.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L286-L303
| null |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def bring_to_front(self):
"""TODO: explain depth sorting"""
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/view.py
|
View.bring_to_front
|
python
|
def bring_to_front(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[-1], ch[index] = ch[index], ch[-1]
|
TODO: explain depth sorting
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/view.py#L347-L352
| null |
class View(object):
"""A rectangular portion of the window.
Views may have zero or more child views contained within it.
Signals
on_mouse_down(view, button, point)
on_mouse_up(view, button, point)
on_mouse_motion(view, point)
on_mouse_drag(view, point, delta)
on_key_down(view, key, code)
on_key_up(view, key)
on_parented(view)
on_orphaned(view) (from parent view)
on_focused(view)
on_blurred(view)
on_selected(view)
on_enabled(view)
on_disabled(view)
on_state_changed(view)
All mouse points passed to event methods and to slots are in local
view coordinates. Use `to_parent` and `to_window` to convert.
"""
def __init__(self, frame=None):
self.frame = frame
self.parent = None
self.children = [] # back->front
self._state = 'normal'
self._enabled = True
self.hidden = False
self.draggable = False
self.shadow_image = None
self.on_focused = callback.Signal()
self.on_blurred = callback.Signal()
self.on_selected = callback.Signal()
self.on_enabled = callback.Signal()
self.on_disabled = callback.Signal()
self.on_state_changed = callback.Signal()
self.on_mouse_up = callback.Signal()
self.on_mouse_down = callback.Signal()
self.on_mouse_motion = callback.Signal()
self.on_mouse_drag = callback.Signal()
self.on_key_down = callback.Signal()
self.on_key_up = callback.Signal()
self.on_parented = callback.Signal()
self.on_orphaned = callback.Signal()
def layout(self):
"""Call to have the view layout itself.
Subclasses should invoke this after laying out child
views and/or updating its own frame.
"""
if self.shadowed:
shadow_size = theme.current.shadow_size
shadowed_frame_size = (self.frame.w + shadow_size,
self.frame.h + shadow_size)
self.surface = pygame.Surface(
shadowed_frame_size, pygame.SRCALPHA, 32)
shadow_image = resource.get_image('shadow')
self.shadow_image = resource.scale_image(shadow_image,
shadowed_frame_size)
else:
self.surface = pygame.Surface(self.frame.size, pygame.SRCALPHA, 32)
self.shadow_image = None
def size_to_fit(self):
rect = self.frame
for child in self.children:
rect = rect.union(child.frame)
self.frame = rect
self.layout()
def update(self, dt):
for child in self.children:
child.update(dt)
def to_parent(self, point):
return (point[0] + self.frame.topleft[0],
point[1] + self.frame.topleft[1])
def from_parent(self, point):
return (point[0] - self.frame.topleft[0],
point[1] - self.frame.topleft[1])
def from_window(self, point):
curr = self
ancestors = [curr]
while curr.parent:
ancestors.append(curr.parent)
curr = curr.parent
for a in reversed(ancestors):
point = a.from_parent(point)
return point
def to_window(self, point):
curr = self
while curr:
point = curr.to_parent(point)
curr = curr.parent
return point
def mouse_up(self, button, point):
self.on_mouse_up(self, button, point)
def mouse_down(self, button, point):
self.on_mouse_down(self, button, point)
def mouse_motion(self, point):
self.on_mouse_motion(self, point)
# only called on drag event if .draggable is True
def mouse_drag(self, point, delta):
self.on_mouse_drag(self, point, delta)
self.frame.topleft = (self.frame.topleft[0] + delta[0],
self.frame.topleft[1] + delta[1])
if self.parent:
self.parent._child_dragged(self)
def key_down(self, key, code):
self.on_key_down(self, key, code)
def key_up(self, key):
self.on_key_up(self, key)
@property
def state(self):
"""The state of the view.
Potential values are 'normal', 'focused', 'selected', 'disabled'.
"""
return self._state
@state.setter
def state(self, new_state):
if self._state != new_state:
self._state = new_state
self.stylize()
self.on_state_changed()
def focus(self):
focus.set(self)
def has_focus(self):
return focus.view == self
def focused(self):
self.state = 'focused'
self.on_focused()
def blurred(self):
self.state = 'normal'
self.on_blurred()
def selected(self):
self.state = 'selected'
self.on_selected()
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, yesno):
if self._enabled != yesno:
self._enabled = yesno
if yesno:
self.enabled()
else:
self.disabled()
def enabled(self):
self.state = 'normal'
self.on_enabled()
def disabled(self):
self.state = 'disabled'
self.on_disabled()
def stylize(self):
"""Apply theme style attributes to this instance and its children.
This also causes a relayout to occur so that any changes in padding
or other stylistic attributes may be handled.
"""
# do children first in case parent needs to override their style
for child in self.children:
child.stylize()
style = theme.current.get_dict(self)
preserve_child = False
try:
preserve_child = getattr(theme.current, 'preserve_child')
except:
preserve_child = False
for key, val in style.iteritems():
kvc.set_value_for_keypath(self, key, val, preserve_child)
self.layout()
def draw(self):
"""Do not call directly."""
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
def get_border_widths(self):
"""Return border width for each side top, left, bottom, right."""
if type(self.border_widths) is int: # uniform size
return [self.border_widths] * 4
return self.border_widths
def hit(self, pt):
"""Find the view (self, child, or None) under the point `pt`."""
if self.hidden or not self._enabled:
return None
if not self.frame.collidepoint(pt):
return None
local_pt = (pt[0] - self.frame.topleft[0],
pt[1] - self.frame.topleft[1])
for child in reversed(self.children): # front to back
hit_view = child.hit(local_pt)
if hit_view is not None:
return hit_view
return self
def center(self):
if self.parent is not None:
self.frame.center = (self.parent.frame.w // 2,
self.parent.frame.h // 2)
def add_child(self, child):
assert child is not None
self.rm_child(child)
self.children.append(child)
child.parent = self
child.parented()
import scene
if scene.current is not None:
child.stylize()
def rm_child(self, child):
for index, ch in enumerate(self.children):
if ch == child:
ch.orphaned()
del self.children[index]
break
def rm(self):
if self.parent:
self.parent.rm_child(self)
def parented(self):
self.on_parented()
def orphaned(self):
self.on_orphaned()
def iter_ancestors(self):
curr = self
while curr.parent:
yield curr.parent
curr = curr.parent
def iter_children(self):
for child in self.children:
yield child
def move_to_back(self):
if self.parent is not None:
ch = self.parent.children
index = ch.index(self)
ch[0], ch[index] = ch[index], ch[0]
|
fictorial/pygameui
|
pygameui/theme.py
|
use_theme
|
python
|
def use_theme(theme):
global current
current = theme
import scene
if scene.current is not None:
scene.current.stylize()
|
Make the given theme current.
There are two included themes: light_theme, dark_theme.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/theme.py#L176-L185
| null |
from itertools import chain
import resource
from colors import *
class Theme(object):
"""A theme is a hierarchical set of view style attributes.
Each view may have a set of attributes that control its
visual style when rendered. These style attributes are stored
in a Theme.
Style attributes are hierarchical in that a view class
may override the style attribute of a parent view class.
Also, a view class need not override all style attributes
of a parent view class.
For instance, let's say we define the default view background
color to be gray and the border color to be black.
a_theme.set(class_name='View',
state='normal',
key='background_color',
value=(128, 128, 128))
a_theme.set(class_name='View',
state='normal',
key='border_color',
value=(0, 0, 0))
Let's assume this is the only style information defined for View.
Now, let's override the background color for a Button, add a
style attribute for the text color, and leave the border color.
a_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(64, 64, 64))
a_theme.set(class_name='Button',
state='normal',
key='text_color',
value=(128, 0, 0))
When a view is stylized (see View.stylize), style attributes and
values are queried in the current Theme and set on the view instance.
b = Button()
b.state = 'normal'
b.stylize()
The style attributes set on 'b' would be:
background_color: (64, 64, 64) from Button
border_color: (0, 0, 0) from View
text_color: (128, 0, 0) from Button
Note that the 'key' is really a 'key path' which would allow you
to style views contained in other views. For instance, an AlertView
has a `title_label` which is a Label. You may wish to style
AlertView titles differently than other labels, and you can. See
the `light_theme` entry for `title_label`. Also see the `kvc` module
for a simple form of Apple's Key-Value Coding for Python.
"""
def __init__(self):
self._styles = {}
def set(self, class_name, state, key, value):
"""Set a single style value for a view class and state.
class_name
The name of the class to be styled; do not
include the package name; e.g. 'Button'.
state
The name of the state to be stylized. One of the
following: 'normal', 'focused', 'selected', 'disabled'
is common.
key
The style attribute name; e.g. 'background_color'.
value
The value of the style attribute; colors are either
a 3-tuple for RGB, a 4-tuple for RGBA, or a pair
thereof for a linear gradient.
"""
self._styles.setdefault(class_name, {}).setdefault(state, {})
self._styles[class_name][state][key] = value
def get_dict_for_class(self, class_name, state=None, base_name='View'):
"""The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions.
"""
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state = 'normal'
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != 'normal':
try:
normal_styles = self._styles[class_name]['normal']
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style
def get_dict(self, obj, state=None, base_name='View'):
"""The style dict for a view instance.
"""
return self.get_dict_for_class(class_name=obj.__class__,
state=obj.state,
base_name=base_name)
def get_value(self, class_name, attr, default_value=None,
state='normal', base_name='View'):
"""Get a single style attribute value for the given class.
"""
styles = self.get_dict_for_class(class_name, state, base_name)
try:
return styles[attr]
except KeyError:
return default_value
current = None
light_theme = Theme()
dark_theme = Theme()
def init_light_theme():
color1 = (227, 227, 159) # a light yellow
color2 = (173, 222, 78) # a light green
color3 = (77, 148, 83) # a dark green
color4 = white_color
color5 = near_white_color
color6 = light_gray_color
color7 = gray_color
color8 = dark_gray_color
color9 = black_color
light_theme.set(class_name='View',
state='normal',
key='background_color',
value=(color4, color5))
light_theme.set(class_name='View',
state='focused',
key='background_color',
value=(color1, color2))
light_theme.set(class_name='View',
state='selected',
key='background_color',
value=(color1, color2))
light_theme.set(class_name='View',
state='normal',
key='border_color',
value=color6)
light_theme.set(class_name='View',
state='normal',
key='border_widths',
value=0)
light_theme.set(class_name='View',
state='normal',
key='margin',
value=(6, 6))
light_theme.set(class_name='View',
state='normal',
key='padding',
value=(0, 0))
light_theme.set(class_name='View',
state='normal',
key='shadowed',
value=False)
light_theme.set(class_name='Scene',
state='normal',
key='background_color',
value=(color5, color4))
light_theme.set(class_name='Label',
state='normal',
key='text_color',
value=color8)
light_theme.set(class_name='Label',
state='selected',
key='text_color',
value=color3)
light_theme.set(class_name='Label',
state='normal',
key='text_shadow_color',
value=color4)
light_theme.set(class_name='Label',
state='normal',
key='text_shadow_offset',
value=(0, 1))
light_theme.set(class_name='Label',
state='normal',
key='padding',
value=(6, 6))
light_theme.set(class_name='Label',
state='normal',
key='border_widths',
value=None)
light_theme.set(class_name='Label',
state='normal',
key='font',
value=resource.get_font(16))
light_theme.label_height = 16 + 6 * 2 # font size + padding above/below
light_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(color4, color6))
light_theme.set(class_name='Button',
state='focused',
key='background_color',
value=color1)
light_theme.set(class_name='Button',
state='normal',
key='text_color',
value=color8)
light_theme.set(class_name='Button',
state='normal',
key='font',
value=resource.get_font(16, use_bold=True))
light_theme.set(class_name='Button',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='Button',
state='normal',
key='border_color',
value=color6)
light_theme.button_height = 16 + 6 * 2 # font size + padding above/below
light_theme.set(class_name='ImageButton',
state='normal',
key='background_color',
value=(color4, color6))
light_theme.set(class_name='ImageButton',
state='focused',
key='background_color',
value=color1)
light_theme.set(class_name='ImageButton',
state='normal',
key='border_color',
value=color6)
light_theme.set(class_name='ImageButton',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='ImageButton',
state='normal',
key='padding',
value=(6, 6))
light_theme.set(class_name='ScrollbarThumbView',
state='normal',
key='background_color',
value=(color4, color6))
light_theme.set(class_name='ScrollbarThumbView',
state='focused',
key='background_color',
value=(color1, color2))
light_theme.set(class_name='ScrollbarThumbView',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='ScrollbarView',
state='normal',
key='background_color',
value=color5)
light_theme.set(class_name='ScrollbarView',
state='normal',
key='border_widths',
value=(1, 1, 0, 0)) # t l b r
light_theme.set(class_name='ScrollView',
state='normal',
key='hole_color',
value=whites_twin_color)
light_theme.set(class_name='ScrollView',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='SliderTrackView',
state='normal',
key='background_color',
value=(color5, color4))
light_theme.set(class_name='SliderTrackView',
state='normal',
key='value_color',
value=(color1, color2))
light_theme.set(class_name='SliderTrackView',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='SliderView',
state='normal',
key='background_color',
value=clear_color)
light_theme.set(class_name='SliderView',
state='normal',
key='border_widths',
value=None)
light_theme.set(class_name='ImageView',
state='normal',
key='background_color',
value=None)
light_theme.set(class_name='ImageView',
state='normal',
key='padding',
value=(0, 0))
light_theme.set(class_name='Checkbox',
state='normal',
key='background_color',
value=clear_color)
light_theme.set(class_name='Checkbox',
state='normal',
key='padding',
value=(0, 0))
light_theme.set(class_name='Checkbox',
state='focused',
key='check_label.background_color',
value=(color1, color2))
light_theme.set(class_name='Checkbox',
state='normal',
key='check_label.border_widths',
value=1)
light_theme.set(class_name='Checkbox',
state='normal',
key='label.background_color',
value=clear_color)
light_theme.set(class_name='SpinnerView',
state='normal',
key='border_widths',
value=None)
light_theme.set(class_name='DialogView',
state='normal',
key='background_color',
value=(color4, color6))
light_theme.set(class_name='DialogView',
state='normal',
key='shadowed',
value=True)
light_theme.shadow_size = 140
light_theme.set(class_name='AlertView',
state='normal',
key='title_label.background_color',
value=color7)
light_theme.set(class_name='AlertView',
state='normal',
key='title_label.text_color',
value=color4)
light_theme.set(class_name='AlertView',
state='normal',
key='title_label.text_shadow_offset',
value=None)
light_theme.set(class_name='AlertView',
state='normal',
key='message_label.background_color',
value=clear_color)
light_theme.set(class_name='AlertView',
state='normal',
key='font',
value=resource.get_font(16))
light_theme.set(class_name='AlertView',
state='normal',
key='padding',
value=(6, 6))
light_theme.set(class_name='NotificationView',
state='normal',
key='background_color',
value=(color1, color2))
light_theme.set(class_name='NotificationView',
state='normal',
key='border_color',
value=color3)
light_theme.set(class_name='NotificationView',
state='normal',
key='border_widths',
value=(0, 2, 2, 2))
light_theme.set(class_name='NotificationView',
state='normal',
key='padding',
value=(0, 0))
light_theme.set(class_name='NotificationView',
state='normal',
key='message_label.background_color',
value=clear_color)
light_theme.set(class_name='SelectView',
state='normal',
key='disclosure_triangle_color',
value=color8)
light_theme.set(class_name='SelectView',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='SelectView',
state='normal',
key='top_label.focusable',
value=False)
light_theme.set(class_name='TextField',
state='focused',
key='label.background_color',
value=(color1, color2))
light_theme.set(class_name='TextField',
state='normal',
key='placeholder_text_color',
value=color6)
light_theme.set(class_name='TextField',
state='normal',
key='border_widths',
value=1)
light_theme.set(class_name='TextField',
state='normal',
key='text_color',
value=color9)
light_theme.set(class_name='TextField',
state='disabled',
key='text_color',
value=color6)
light_theme.set(class_name='TextField',
state='normal',
key='blink_cursor',
value=True)
light_theme.set(class_name='TextField',
state='normal',
key='cursor_blink_duration',
value=450)
light_theme.set(class_name='GridView',
state='normal',
key='background_color',
value=color4)
light_theme.set(class_name='GridView',
state='normal',
key='line_color',
value=color6)
def init_dark_theme():
# TODO
pass
def init():
"""Initialize theme support."""
init_light_theme()
init_dark_theme()
use_theme(light_theme)
|
fictorial/pygameui
|
pygameui/theme.py
|
Theme.set
|
python
|
def set(self, class_name, state, key, value):
self._styles.setdefault(class_name, {}).setdefault(state, {})
self._styles[class_name][state][key] = value
|
Set a single style value for a view class and state.
class_name
The name of the class to be styled; do not
include the package name; e.g. 'Button'.
state
The name of the state to be stylized. One of the
following: 'normal', 'focused', 'selected', 'disabled'
is common.
key
The style attribute name; e.g. 'background_color'.
value
The value of the style attribute; colors are either
a 3-tuple for RGB, a 4-tuple for RGBA, or a pair
thereof for a linear gradient.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/theme.py#L71-L97
| null |
class Theme(object):
"""A theme is a hierarchical set of view style attributes.
Each view may have a set of attributes that control its
visual style when rendered. These style attributes are stored
in a Theme.
Style attributes are hierarchical in that a view class
may override the style attribute of a parent view class.
Also, a view class need not override all style attributes
of a parent view class.
For instance, let's say we define the default view background
color to be gray and the border color to be black.
a_theme.set(class_name='View',
state='normal',
key='background_color',
value=(128, 128, 128))
a_theme.set(class_name='View',
state='normal',
key='border_color',
value=(0, 0, 0))
Let's assume this is the only style information defined for View.
Now, let's override the background color for a Button, add a
style attribute for the text color, and leave the border color.
a_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(64, 64, 64))
a_theme.set(class_name='Button',
state='normal',
key='text_color',
value=(128, 0, 0))
When a view is stylized (see View.stylize), style attributes and
values are queried in the current Theme and set on the view instance.
b = Button()
b.state = 'normal'
b.stylize()
The style attributes set on 'b' would be:
background_color: (64, 64, 64) from Button
border_color: (0, 0, 0) from View
text_color: (128, 0, 0) from Button
Note that the 'key' is really a 'key path' which would allow you
to style views contained in other views. For instance, an AlertView
has a `title_label` which is a Label. You may wish to style
AlertView titles differently than other labels, and you can. See
the `light_theme` entry for `title_label`. Also see the `kvc` module
for a simple form of Apple's Key-Value Coding for Python.
"""
def __init__(self):
self._styles = {}
def get_dict_for_class(self, class_name, state=None, base_name='View'):
"""The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions.
"""
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state = 'normal'
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != 'normal':
try:
normal_styles = self._styles[class_name]['normal']
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style
def get_dict(self, obj, state=None, base_name='View'):
"""The style dict for a view instance.
"""
return self.get_dict_for_class(class_name=obj.__class__,
state=obj.state,
base_name=base_name)
def get_value(self, class_name, attr, default_value=None,
state='normal', base_name='View'):
"""Get a single style attribute value for the given class.
"""
styles = self.get_dict_for_class(class_name, state, base_name)
try:
return styles[attr]
except KeyError:
return default_value
|
fictorial/pygameui
|
pygameui/theme.py
|
Theme.get_dict_for_class
|
python
|
def get_dict_for_class(self, class_name, state=None, base_name='View'):
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state = 'normal'
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != 'normal':
try:
normal_styles = self._styles[class_name]['normal']
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style
|
The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/theme.py#L99-L149
| null |
class Theme(object):
"""A theme is a hierarchical set of view style attributes.
Each view may have a set of attributes that control its
visual style when rendered. These style attributes are stored
in a Theme.
Style attributes are hierarchical in that a view class
may override the style attribute of a parent view class.
Also, a view class need not override all style attributes
of a parent view class.
For instance, let's say we define the default view background
color to be gray and the border color to be black.
a_theme.set(class_name='View',
state='normal',
key='background_color',
value=(128, 128, 128))
a_theme.set(class_name='View',
state='normal',
key='border_color',
value=(0, 0, 0))
Let's assume this is the only style information defined for View.
Now, let's override the background color for a Button, add a
style attribute for the text color, and leave the border color.
a_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(64, 64, 64))
a_theme.set(class_name='Button',
state='normal',
key='text_color',
value=(128, 0, 0))
When a view is stylized (see View.stylize), style attributes and
values are queried in the current Theme and set on the view instance.
b = Button()
b.state = 'normal'
b.stylize()
The style attributes set on 'b' would be:
background_color: (64, 64, 64) from Button
border_color: (0, 0, 0) from View
text_color: (128, 0, 0) from Button
Note that the 'key' is really a 'key path' which would allow you
to style views contained in other views. For instance, an AlertView
has a `title_label` which is a Label. You may wish to style
AlertView titles differently than other labels, and you can. See
the `light_theme` entry for `title_label`. Also see the `kvc` module
for a simple form of Apple's Key-Value Coding for Python.
"""
def __init__(self):
self._styles = {}
def set(self, class_name, state, key, value):
"""Set a single style value for a view class and state.
class_name
The name of the class to be styled; do not
include the package name; e.g. 'Button'.
state
The name of the state to be stylized. One of the
following: 'normal', 'focused', 'selected', 'disabled'
is common.
key
The style attribute name; e.g. 'background_color'.
value
The value of the style attribute; colors are either
a 3-tuple for RGB, a 4-tuple for RGBA, or a pair
thereof for a linear gradient.
"""
self._styles.setdefault(class_name, {}).setdefault(state, {})
self._styles[class_name][state][key] = value
def get_dict(self, obj, state=None, base_name='View'):
"""The style dict for a view instance.
"""
return self.get_dict_for_class(class_name=obj.__class__,
state=obj.state,
base_name=base_name)
def get_value(self, class_name, attr, default_value=None,
state='normal', base_name='View'):
"""Get a single style attribute value for the given class.
"""
styles = self.get_dict_for_class(class_name, state, base_name)
try:
return styles[attr]
except KeyError:
return default_value
|
fictorial/pygameui
|
pygameui/theme.py
|
Theme.get_dict
|
python
|
def get_dict(self, obj, state=None, base_name='View'):
return self.get_dict_for_class(class_name=obj.__class__,
state=obj.state,
base_name=base_name)
|
The style dict for a view instance.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/theme.py#L151-L157
|
[
"def get_dict_for_class(self, class_name, state=None, base_name='View'):\n \"\"\"The style dict for a given class and state.\n\n This collects the style attributes from parent classes\n and the class of the given object and gives precedence\n to values thereof to the children.\n\n The state attribute of the view instance is taken as\n the current state if state is None.\n\n If the state is not 'normal' then the style definitions\n for the 'normal' state are mixed-in from the given state\n style definitions, giving precedence to the non-'normal'\n style definitions.\n\n \"\"\"\n classes = []\n klass = class_name\n\n while True:\n classes.append(klass)\n if klass.__name__ == base_name:\n break\n klass = klass.__bases__[0]\n\n if state is None:\n state = 'normal'\n\n style = {}\n\n for klass in classes:\n class_name = klass.__name__\n\n try:\n state_styles = self._styles[class_name][state]\n except KeyError:\n state_styles = {}\n\n if state != 'normal':\n try:\n normal_styles = self._styles[class_name]['normal']\n except KeyError:\n normal_styles = {}\n\n state_styles = dict(chain(normal_styles.iteritems(),\n state_styles.iteritems()))\n\n style = dict(chain(state_styles.iteritems(),\n style.iteritems()))\n\n return style\n"
] |
class Theme(object):
"""A theme is a hierarchical set of view style attributes.
Each view may have a set of attributes that control its
visual style when rendered. These style attributes are stored
in a Theme.
Style attributes are hierarchical in that a view class
may override the style attribute of a parent view class.
Also, a view class need not override all style attributes
of a parent view class.
For instance, let's say we define the default view background
color to be gray and the border color to be black.
a_theme.set(class_name='View',
state='normal',
key='background_color',
value=(128, 128, 128))
a_theme.set(class_name='View',
state='normal',
key='border_color',
value=(0, 0, 0))
Let's assume this is the only style information defined for View.
Now, let's override the background color for a Button, add a
style attribute for the text color, and leave the border color.
a_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(64, 64, 64))
a_theme.set(class_name='Button',
state='normal',
key='text_color',
value=(128, 0, 0))
When a view is stylized (see View.stylize), style attributes and
values are queried in the current Theme and set on the view instance.
b = Button()
b.state = 'normal'
b.stylize()
The style attributes set on 'b' would be:
background_color: (64, 64, 64) from Button
border_color: (0, 0, 0) from View
text_color: (128, 0, 0) from Button
Note that the 'key' is really a 'key path' which would allow you
to style views contained in other views. For instance, an AlertView
has a `title_label` which is a Label. You may wish to style
AlertView titles differently than other labels, and you can. See
the `light_theme` entry for `title_label`. Also see the `kvc` module
for a simple form of Apple's Key-Value Coding for Python.
"""
def __init__(self):
self._styles = {}
def set(self, class_name, state, key, value):
"""Set a single style value for a view class and state.
class_name
The name of the class to be styled; do not
include the package name; e.g. 'Button'.
state
The name of the state to be stylized. One of the
following: 'normal', 'focused', 'selected', 'disabled'
is common.
key
The style attribute name; e.g. 'background_color'.
value
The value of the style attribute; colors are either
a 3-tuple for RGB, a 4-tuple for RGBA, or a pair
thereof for a linear gradient.
"""
self._styles.setdefault(class_name, {}).setdefault(state, {})
self._styles[class_name][state][key] = value
def get_dict_for_class(self, class_name, state=None, base_name='View'):
"""The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions.
"""
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state = 'normal'
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != 'normal':
try:
normal_styles = self._styles[class_name]['normal']
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style
def get_value(self, class_name, attr, default_value=None,
state='normal', base_name='View'):
"""Get a single style attribute value for the given class.
"""
styles = self.get_dict_for_class(class_name, state, base_name)
try:
return styles[attr]
except KeyError:
return default_value
|
fictorial/pygameui
|
pygameui/theme.py
|
Theme.get_value
|
python
|
def get_value(self, class_name, attr, default_value=None,
state='normal', base_name='View'):
styles = self.get_dict_for_class(class_name, state, base_name)
try:
return styles[attr]
except KeyError:
return default_value
|
Get a single style attribute value for the given class.
|
train
|
https://github.com/fictorial/pygameui/blob/af6a35f347d6fafa66c4255bbbe38736d842ff65/pygameui/theme.py#L159-L168
|
[
"def get_dict_for_class(self, class_name, state=None, base_name='View'):\n \"\"\"The style dict for a given class and state.\n\n This collects the style attributes from parent classes\n and the class of the given object and gives precedence\n to values thereof to the children.\n\n The state attribute of the view instance is taken as\n the current state if state is None.\n\n If the state is not 'normal' then the style definitions\n for the 'normal' state are mixed-in from the given state\n style definitions, giving precedence to the non-'normal'\n style definitions.\n\n \"\"\"\n classes = []\n klass = class_name\n\n while True:\n classes.append(klass)\n if klass.__name__ == base_name:\n break\n klass = klass.__bases__[0]\n\n if state is None:\n state = 'normal'\n\n style = {}\n\n for klass in classes:\n class_name = klass.__name__\n\n try:\n state_styles = self._styles[class_name][state]\n except KeyError:\n state_styles = {}\n\n if state != 'normal':\n try:\n normal_styles = self._styles[class_name]['normal']\n except KeyError:\n normal_styles = {}\n\n state_styles = dict(chain(normal_styles.iteritems(),\n state_styles.iteritems()))\n\n style = dict(chain(state_styles.iteritems(),\n style.iteritems()))\n\n return style\n"
] |
class Theme(object):
"""A theme is a hierarchical set of view style attributes.
Each view may have a set of attributes that control its
visual style when rendered. These style attributes are stored
in a Theme.
Style attributes are hierarchical in that a view class
may override the style attribute of a parent view class.
Also, a view class need not override all style attributes
of a parent view class.
For instance, let's say we define the default view background
color to be gray and the border color to be black.
a_theme.set(class_name='View',
state='normal',
key='background_color',
value=(128, 128, 128))
a_theme.set(class_name='View',
state='normal',
key='border_color',
value=(0, 0, 0))
Let's assume this is the only style information defined for View.
Now, let's override the background color for a Button, add a
style attribute for the text color, and leave the border color.
a_theme.set(class_name='Button',
state='normal',
key='background_color',
value=(64, 64, 64))
a_theme.set(class_name='Button',
state='normal',
key='text_color',
value=(128, 0, 0))
When a view is stylized (see View.stylize), style attributes and
values are queried in the current Theme and set on the view instance.
b = Button()
b.state = 'normal'
b.stylize()
The style attributes set on 'b' would be:
background_color: (64, 64, 64) from Button
border_color: (0, 0, 0) from View
text_color: (128, 0, 0) from Button
Note that the 'key' is really a 'key path' which would allow you
to style views contained in other views. For instance, an AlertView
has a `title_label` which is a Label. You may wish to style
AlertView titles differently than other labels, and you can. See
the `light_theme` entry for `title_label`. Also see the `kvc` module
for a simple form of Apple's Key-Value Coding for Python.
"""
def __init__(self):
self._styles = {}
def set(self, class_name, state, key, value):
"""Set a single style value for a view class and state.
class_name
The name of the class to be styled; do not
include the package name; e.g. 'Button'.
state
The name of the state to be stylized. One of the
following: 'normal', 'focused', 'selected', 'disabled'
is common.
key
The style attribute name; e.g. 'background_color'.
value
The value of the style attribute; colors are either
a 3-tuple for RGB, a 4-tuple for RGBA, or a pair
thereof for a linear gradient.
"""
self._styles.setdefault(class_name, {}).setdefault(state, {})
self._styles[class_name][state][key] = value
def get_dict_for_class(self, class_name, state=None, base_name='View'):
"""The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions.
"""
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state = 'normal'
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != 'normal':
try:
normal_styles = self._styles[class_name]['normal']
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style
def get_dict(self, obj, state=None, base_name='View'):
"""The style dict for a view instance.
"""
return self.get_dict_for_class(class_name=obj.__class__,
state=obj.state,
base_name=base_name)
|
brianhie/scanorama
|
bin/unsupervised.py
|
silhouette_score
|
python
|
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
|
Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L27-L106
|
[
"def silhouette_samples(X, labels, metric='euclidean', **kwds):\n \"\"\"Compute the Silhouette Coefficient for each sample.\n\n The Silhouette Coefficient is a measure of how well samples are clustered\n with samples that are similar to themselves. Clustering models with a high\n Silhouette Coefficient are said to be dense, where samples in the same\n cluster are similar to each other, and well separated, where samples in\n different clusters are not very similar to each other.\n\n The Silhouette Coefficient is calculated using the mean intra-cluster\n distance (``a``) and the mean nearest-cluster distance (``b``) for each\n sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,\n b)``.\n Note that Silhouette Coefficient is only defined if number of labels\n is 2 <= n_labels <= n_samples - 1.\n\n This function returns the Silhouette Coefficient for each sample.\n\n The best value is 1 and the worst value is -1. Values near 0 indicate\n overlapping clusters.\n\n Read more in the :ref:`User Guide <silhouette_coefficient>`.\n\n Parameters\n ----------\n X : array [n_samples_a, n_samples_a] if metric == \"precomputed\", or, \\\n [n_samples_a, n_features] otherwise\n Array of pairwise distances between samples, or a feature array.\n\n labels : array, shape = [n_samples]\n label values for each sample\n\n metric : string, or callable\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is\n the distance array itself, use \"precomputed\" as the metric.\n\n **kwds : optional keyword parameters\n Any further parameters are passed directly to the distance function.\n If using a ``scipy.spatial.distance`` metric, the parameters are still\n metric dependent. See the scipy docs for usage examples.\n\n Returns\n -------\n silhouette : array, shape = [n_samples]\n Silhouette Coefficient for each samples.\n\n References\n ----------\n\n .. [1] `Peter J. Rousseeuw (1987). \"Silhouettes: a Graphical Aid to the\n Interpretation and Validation of Cluster Analysis\". Computational\n and Applied Mathematics 20: 53-65.\n <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_\n\n .. [2] `Wikipedia entry on the Silhouette Coefficient\n <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_\n\n \"\"\"\n X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n check_number_of_labels(len(le.classes_), X.shape[0])\n\n distances = pairwise_distances(X, metric=metric, **kwds)\n unique_labels = le.classes_\n n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))\n\n # For sample i, store the mean distance of the cluster to which\n # it belongs in intra_clust_dists[i]\n intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)\n\n # For sample i, store the mean distance of the second closest\n # cluster in inter_clust_dists[i]\n inter_clust_dists = np.inf + intra_clust_dists\n\n for curr_label in range(len(unique_labels)):\n\n # Find inter_clust_dist for all samples belonging to the same\n # label.\n mask = labels == curr_label\n current_distances = distances[mask]\n\n # Leave out current sample.\n n_samples_curr_lab = n_samples_per_label[curr_label] - 1\n if n_samples_curr_lab != 0:\n intra_clust_dists[mask] = np.sum(\n current_distances[:, mask], axis=1) / n_samples_curr_lab\n\n # Now iterate over all other labels, finding the mean\n # cluster distance that is closest to every sample.\n for other_label in range(len(unique_labels)):\n if other_label != curr_label:\n other_mask = labels == other_label\n other_distances = np.mean(\n current_distances[:, other_mask], axis=1)\n inter_clust_dists[mask] = np.minimum(\n inter_clust_dists[mask], other_distances)\n\n sil_samples = inter_clust_dists - intra_clust_dists\n sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)\n # score 0 for clusters of size 1, according to the paper\n sil_samples[n_samples_per_label.take(labels) == 1] = 0\n return sil_samples\n"
] |
"""Unsupervised evaluation metrics."""
# Modified by Brian Hie <brianhie@mit.edu> to allow for multicore
# pairwise distance matrix computation.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/cluster/unsupervised.py
# Authors: Robert Layton <robertlayton@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils import check_X_y
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
|
brianhie/scanorama
|
bin/unsupervised.py
|
silhouette_samples
|
python
|
def silhouette_samples(X, labels, metric='euclidean', **kwds):
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
|
Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L109-L213
|
[
"def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)\n"
] |
"""Unsupervised evaluation metrics."""
# Modified by Brian Hie <brianhie@mit.edu> to allow for multicore
# pairwise distance matrix computation.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/cluster/unsupervised.py
# Authors: Robert Layton <robertlayton@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils import check_X_y
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
|
brianhie/scanorama
|
bin/unsupervised.py
|
calinski_harabaz_score
|
python
|
def calinski_harabaz_score(X, labels):
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
|
Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L216-L263
|
[
"def check_number_of_labels(n_labels, n_samples):\n if not 1 < n_labels < n_samples:\n raise ValueError(\"Number of labels is %d. Valid values are 2 \"\n \"to n_samples - 1 (inclusive)\" % n_labels)\n"
] |
"""Unsupervised evaluation metrics."""
# Modified by Brian Hie <brianhie@mit.edu> to allow for multicore
# pairwise distance matrix computation.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/metrics/cluster/unsupervised.py
# Authors: Robert Layton <robertlayton@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils import check_X_y
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
|
brianhie/scanorama
|
scanorama/utils.py
|
handle_zeros_in_scale
|
python
|
def handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.
Adapted from sklearn.preprocessing.data'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
|
Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.
Adapted from sklearn.preprocessing.data
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/utils.py#L124-L139
| null |
import errno
from fbpca import pca
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [10.0, 9.0]
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import sys
np.random.seed(0)
def dispersion(X):
mean = X.mean(0)
dispersion = np.zeros(mean.shape)
nonzero_idx = np.nonzero(mean > 1e-10)[1]
X_nonzero = X[:, nonzero_idx]
nonzero_mean = X_nonzero.mean(0)
nonzero_var = (X_nonzero.multiply(X_nonzero)).mean(0)
temp = (nonzero_var / nonzero_mean)
dispersion[mean > 1e-10] = temp.A1
dispersion[mean <= 1e-10] = float('-inf')
return dispersion
def reduce_dimensionality(X, dim_red_k=100):
k = min((dim_red_k, X.shape[0], X.shape[1]))
U, s, Vt = pca(X, k=k) # Automatically centers.
return U[:, range(k)] * s[range(k)]
def visualize_cluster(coords, cluster, cluster_labels,
cluster_name=None, size=1, viz_prefix='vc',
image_suffix='.svg'):
if not cluster_name:
cluster_name = cluster
labels = [ 1 if c_i == cluster else 0
for c_i in cluster_labels ]
c_idx = [ i for i in range(len(labels)) if labels[i] == 1 ]
nc_idx = [ i for i in range(len(labels)) if labels[i] == 0 ]
colors = np.array([ '#cccccc', '#377eb8' ])
image_fname = '{}_cluster{}{}'.format(
viz_prefix, cluster, image_suffix
)
plt.figure()
plt.scatter(coords[nc_idx, 0], coords[nc_idx, 1],
c=colors[0], s=size)
plt.scatter(coords[c_idx, 0], coords[c_idx, 1],
c=colors[1], s=size)
plt.title(str(cluster_name))
plt.savefig(image_fname, dpi=500)
def visualize_expr(X, coords, genes, viz_gene, image_suffix='.svg',
new_fig=True, size=1, viz_prefix='ve'):
genes = [ gene.upper() for gene in genes ]
viz_gene = viz_gene.upper()
if not viz_gene.upper() in genes:
sys.stderr.write('Warning: Could not find gene {}\n'.format(viz_gene))
return
image_fname = '{}_{}{}'.format(
viz_prefix, viz_gene, image_suffix
)
# Color based on percentiles.
x_gene = X[:, list(genes).index(viz_gene)].toarray()
colors = np.zeros(x_gene.shape)
n_tiles = 100
prev_percentile = min(x_gene)
for i in range(n_tiles):
q = (i+1) / float(n_tiles) * 100.
percentile = np.percentile(x_gene, q)
idx = np.logical_and(prev_percentile <= x_gene,
x_gene <= percentile)
colors[idx] = i
prev_percentile = percentile
colors = colors.flatten()
if new_fig:
plt.figure()
plt.title(viz_gene)
plt.scatter(coords[:, 0], coords[:, 1],
c=colors, cmap=cm.get_cmap('Reds'), s=size)
plt.savefig(image_fname, dpi=500)
def visualize_dropout(X, coords, image_suffix='.svg',
new_fig=True, size=1, viz_prefix='dropout'):
image_fname = '{}{}'.format(
viz_prefix, image_suffix
)
# Color based on percentiles.
x_gene = np.array(np.sum(X != 0, axis=1))
colors = np.zeros(x_gene.shape)
n_tiles = 100
prev_percentile = min(x_gene)
for i in range(n_tiles):
q = (i+1) / float(n_tiles) * 100.
percentile = np.percentile(x_gene, q)
idx = np.logical_and(prev_percentile <= x_gene,
x_gene <= percentile)
colors[idx] = i
prev_percentile = percentile
colors = colors.flatten()
if new_fig:
plt.figure()
plt.title(viz_prefix)
plt.scatter(coords[:, 0], coords[:, 1],
c=colors, cmap=cm.get_cmap('Reds'), s=size)
plt.savefig(image_fname, dpi=500)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
_joint_probabilities
|
python
|
def _joint_probabilities(distances, desired_perplexity, verbose):
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
|
Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L39-L68
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
_joint_probabilities_nn
|
python
|
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
|
Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L71-L124
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
_kl_divergence
|
python
|
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
|
t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L127-L189
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
_kl_divergence_bh
|
python
|
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
|
t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L192-L258
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
_gradient_descent
|
python
|
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
|
Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L261-L383
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
trustworthiness
|
python
|
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
|
Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L386-L445
| null |
# Modified by Brian Hie <brianhie@mit.edu> to use an approximate nearest
# neighbors search.
# Original source code available at:
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/manifold/t_sne.py
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import _utils
from sklearn.manifold import _barnes_hut_tsne
from sklearn.externals.six import string_types
from sklearn.utils import deprecated
from annoy import AnnoyIndex
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
TSNEApprox._fit
|
python
|
def _fit(self, X, skip_num_points=0):
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
|
Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L621-L784
| null |
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
TSNEApprox._tsne
|
python
|
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
|
Runs t-SNE.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L792-L853
| null |
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/t_sne_approx.py
|
TSNEApprox.fit_transform
|
python
|
def fit_transform(self, X, y=None):
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
|
Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L855-L872
| null |
class TSNEApprox(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = AnnoyIndex(X.shape[1], metric='euclidean')
t0 = time()
for i in range(n_samples):
knn.add_item(i, X[i, :])
knn.build(50)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
neighbors_nn = np.zeros((n_samples, k), dtype=int)
distances_nn = np.zeros((n_samples, k))
for i in range(n_samples):
(neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector(
X[i, :], k, include_distances=True
)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
|
brianhie/scanorama
|
scanorama/scanorama.py
|
correct
|
python
|
def correct(datasets_full, genes_list, return_dimred=False,
batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,
dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,
return_dense=False, hvg=None, union=False,
geosketch=False, geosketch_max=20000):
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
expr_datasets=datasets, # Modified in place.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
if return_dense:
datasets = [ ds.toarray() for ds in datasets ]
if return_dimred:
return datasets_dimred, datasets, genes
return datasets, genes
|
Integrate and batch correct a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
return_dimred: `bool`, optional (default: `False`)
In addition to returning batch corrected matrices, also returns
integrated low-dimesional embeddings.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
return_dense: `bool`, optional (default: `False`)
Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
corrected, genes
By default (`return_dimred=False`), returns a two-tuple containing a
list of `scipy.sparse.csr_matrix` each with batch corrected values,
and a single list of genes containing the intersection of inputted
genes.
integrated, corrected, genes
When `return_dimred=False`, returns a three-tuple containing a list
of `numpy.ndarray` with integrated low dimensional embeddings, a list
of `scipy.sparse.csr_matrix` each with batch corrected values, and a
a single list of genes containing the intersection of inputted genes.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L37-L111
|
[
"def merge_datasets(datasets, genes, ds_names=None, verbose=True,\n union=False):\n if union:\n sys.stderr.write(\n 'WARNING: Integrating based on the union of genes is '\n 'highly discouraged, consider taking the intersection '\n 'or requantifying gene expression.\\n'\n )\n\n # Find genes in common.\n keep_genes = set()\n for idx, gene_list in enumerate(genes):\n if len(keep_genes) == 0:\n keep_genes = set(gene_list)\n elif union:\n keep_genes |= set(gene_list)\n else:\n keep_genes &= set(gene_list)\n if not union and not ds_names is None and verbose:\n print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))\n if len(keep_genes) == 0:\n print('Error: No genes found in all datasets, exiting...')\n exit(1)\n if verbose:\n print('Found {} genes among all datasets'\n .format(len(keep_genes)))\n\n if union:\n union_genes = sorted(keep_genes)\n for i in range(len(datasets)):\n if verbose:\n print('Processing data set {}'.format(i))\n X_new = np.zeros((datasets[i].shape[0], len(union_genes)))\n X_old = csc_matrix(datasets[i])\n gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }\n for j, gene in enumerate(union_genes):\n if gene in gene_to_idx:\n X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()\n datasets[i] = csr_matrix(X_new)\n ret_genes = np.array(union_genes)\n else:\n # Only keep genes in common.\n ret_genes = np.array(sorted(keep_genes))\n for i in range(len(datasets)):\n # Remove duplicate genes.\n uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)\n datasets[i] = datasets[i][:, uniq_idx]\n\n # Do gene filtering.\n gene_sort_idx = np.argsort(uniq_genes)\n gene_idx = [ idx for idx in gene_sort_idx\n if uniq_genes[idx] in keep_genes ]\n datasets[i] = datasets[i][:, gene_idx]\n assert(np.array_equal(uniq_genes[gene_idx], ret_genes))\n\n return datasets, ret_genes\n",
"def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):\n # Only keep highly variable genes\n if not hvg is None and hvg > 0 and hvg < len(genes):\n if verbose:\n print('Highly variable filter...')\n X = vstack(datasets)\n disp = dispersion(X)\n highest_disp_idx = np.argsort(disp[0])[::-1]\n top_genes = set(genes[highest_disp_idx[range(hvg)]])\n for i in range(len(datasets)):\n gene_idx = [ idx for idx, g_i in enumerate(genes)\n if g_i in top_genes ]\n datasets[i] = datasets[i][:, gene_idx]\n genes = np.array(sorted(top_genes))\n\n # Normalize.\n if verbose:\n print('Normalizing...')\n for i, ds in enumerate(datasets):\n datasets[i] = normalize(ds, axis=1)\n\n # Compute compressed embedding.\n if dimred > 0:\n if verbose:\n print('Reducing dimension...')\n datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)\n if verbose:\n print('Done processing.')\n return datasets_dimred, genes\n\n if verbose:\n print('Done processing.')\n\n return datasets, genes\n",
"def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,\n sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,\n ds_names=None, batch_size=None,\n geosketch=False, geosketch_max=20000, alignments=None, matches=None):\n if len(datasets) == 1:\n return datasets\n\n if alignments is None and matches is None:\n alignments, matches = find_alignments(\n datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,\n geosketch=geosketch, geosketch_max=geosketch_max\n )\n\n ds_assembled = {}\n panoramas = []\n for i, j in alignments:\n if verbose:\n if ds_names is None:\n print('Processing datasets {}'.format((i, j)))\n else:\n print('Processing datasets {} <=> {}'.\n format(ds_names[i], ds_names[j]))\n\n # Only consider a dataset a fixed amount of times.\n if not i in ds_assembled:\n ds_assembled[i] = 0\n ds_assembled[i] += 1\n if not j in ds_assembled:\n ds_assembled[j] = 0\n ds_assembled[j] += 1\n if ds_assembled[i] > 3 and ds_assembled[j] > 3:\n continue\n\n # See if datasets are involved in any current panoramas.\n panoramas_i = [ panoramas[p] for p in range(len(panoramas))\n if i in panoramas[p] ]\n assert(len(panoramas_i) <= 1)\n panoramas_j = [ panoramas[p] for p in range(len(panoramas))\n if j in panoramas[p] ]\n assert(len(panoramas_j) <= 1)\n\n if len(panoramas_i) == 0 and len(panoramas_j) == 0:\n if datasets[i].shape[0] < datasets[j].shape[0]:\n i, j = j, i\n panoramas.append([ i ])\n panoramas_i = [ panoramas[-1] ]\n\n # Map dataset i to panorama j.\n if len(panoramas_i) == 0:\n curr_ds = datasets[i]\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n match = []\n base = 0\n for p in panoramas_j[0]:\n if i < p and (i, p) in matches:\n match.extend([ (a, b + base) for a, b in matches[(i, p)] ])\n elif i > p and (p, i) in matches:\n match.extend([ (b, a + base) for a, b in matches[(p, i)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n datasets[i] = curr_ds + bias\n\n if expr_datasets:\n curr_ds = expr_datasets[i]\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_j[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n sigma=sigma, cn=True, batch_size=batch_size)\n expr_datasets[i] = curr_ds + bias\n\n panoramas_j[0].append(i)\n\n # Map dataset j to panorama i.\n elif len(panoramas_j) == 0:\n curr_ds = datasets[j]\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\n match = []\n base = 0\n for p in panoramas_i[0]:\n if j < p and (j, p) in matches:\n match.extend([ (a, b + base) for a, b in matches[(j, p)] ])\n elif j > p and (p, j) in matches:\n match.extend([ (b, a + base) for a, b in matches[(p, j)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n datasets[j] = curr_ds + bias\n\n if expr_datasets:\n curr_ds = expr_datasets[j]\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_i[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n cn=True, batch_size=batch_size)\n expr_datasets[j] = curr_ds + bias\n\n panoramas_i[0].append(j)\n\n # Merge two panoramas together.\n else:\n curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n # Find base indices into each panorama.\n base_i = 0\n for p in panoramas_i[0]:\n if p == i: break\n base_i += datasets[p].shape[0]\n base_j = 0\n for p in panoramas_j[0]:\n if p == j: break\n base_j += datasets[p].shape[0]\n\n # Find matching indices.\n match = []\n base = 0\n for p in panoramas_i[0]:\n if p == i and j < p and (j, p) in matches:\n match.extend([ (b + base, a + base_j)\n for a, b in matches[(j, p)] ])\n elif p == i and j > p and (p, j) in matches:\n match.extend([ (a + base, b + base_j)\n for a, b in matches[(p, j)] ])\n base += datasets[p].shape[0]\n base = 0\n for p in panoramas_j[0]:\n if p == j and i < p and (i, p) in matches:\n match.extend([ (a + base_i, b + base)\n for a, b in matches[(i, p)] ])\n elif p == j and i > p and (p, i) in matches:\n match.extend([ (b + base_i, a + base)\n for a, b in matches[(p, i)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n # Apply transformation to entire panorama.\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n curr_ds += bias\n base = 0\n for p in panoramas_i[0]:\n n_cells = datasets[p].shape[0]\n datasets[p] = curr_ds[base:(base + n_cells), :]\n base += n_cells\n\n if not expr_datasets is None:\n curr_ds = vstack([ expr_datasets[p]\n for p in panoramas_i[0] ])\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_j[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n sigma=sigma, cn=True, batch_size=batch_size)\n curr_ds += bias\n base = 0\n for p in panoramas_i[0]:\n n_cells = expr_datasets[p].shape[0]\n expr_datasets[p] = curr_ds[base:(base + n_cells), :]\n base += n_cells\n\n # Merge panoramas i and j and delete one.\n if panoramas_i[0] != panoramas_j[0]:\n panoramas_i[0] += panoramas_j[0]\n panoramas.remove(panoramas_j[0])\n\n # Visualize.\n if view_match:\n plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)\n\n return datasets\n",
"def check_datasets(datasets_full):\n datasets_new = []\n for i, ds in enumerate(datasets_full):\n if issubclass(type(ds), np.ndarray):\n datasets_new.append(csr_matrix(ds))\n elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):\n datasets_new.append(ds)\n else:\n sys.stderr.write('ERROR: Data sets must be numpy array or '\n 'scipy.sparse.csr_matrix, received type '\n '{}.\\n'.format(type(ds)))\n exit(1)\n return datasets_new\n"
] |
from annoy import AnnoyIndex
from intervaltree import IntervalTree
from itertools import cycle, islice
import numpy as np
import operator
import random
import scipy
from scipy.sparse import csc_matrix, csr_matrix, vstack
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import rbf_kernel, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import sys
import warnings
from .t_sne_approx import TSNEApprox
from .utils import plt, dispersion, reduce_dimensionality
from .utils import visualize_cluster, visualize_expr, visualize_dropout
from .utils import handle_zeros_in_scale
np.random.seed(0)
random.seed(0)
# Default parameters.
ALPHA = 0.10
APPROX = True
BATCH_SIZE = 5000
DIMRED = 100
HVG = None
KNN = 20
N_ITER = 500
PERPLEXITY = 1200
SIGMA = 15
VERBOSE = 2
# Do batch correction on a list of data sets.
def correct(datasets_full, genes_list, return_dimred=False,
batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,
dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,
return_dense=False, hvg=None, union=False,
geosketch=False, geosketch_max=20000):
"""Integrate and batch correct a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
return_dimred: `bool`, optional (default: `False`)
In addition to returning batch corrected matrices, also returns
integrated low-dimesional embeddings.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
return_dense: `bool`, optional (default: `False`)
Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
corrected, genes
By default (`return_dimred=False`), returns a two-tuple containing a
list of `scipy.sparse.csr_matrix` each with batch corrected values,
and a single list of genes containing the intersection of inputted
genes.
integrated, corrected, genes
When `return_dimred=False`, returns a three-tuple containing a list
of `numpy.ndarray` with integrated low dimensional embeddings, a list
of `scipy.sparse.csr_matrix` each with batch corrected values, and a
a single list of genes containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
expr_datasets=datasets, # Modified in place.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
if return_dense:
datasets = [ ds.toarray() for ds in datasets ]
if return_dimred:
return datasets_dimred, datasets, genes
return datasets, genes
# Integrate a list of data sets.
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE,
verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX,
sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False,
geosketch_max=20000, n_iter=1, union=False, hvg=None):
"""Integrate a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
integrated, genes
Returns a two-tuple containing a list of `numpy.ndarray` with
integrated low dimensional embeddings and a single list of genes
containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
for _ in range(n_iter):
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
return datasets_dimred, genes
# Batch correction with scanpy's AnnData object.
def correct_scanpy(adatas, **kwargs):
"""Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
"""
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas
# Integration with scanpy's AnnData object.
def integrate_scanpy(adatas, **kwargs):
"""Integrate a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate.
kwargs : `dict`
See documentation for the `integrate()` method for a full list of
parameters to use for batch correction.
Returns
-------
integrated
Returns a list of `np.ndarray` with integrated low-dimensional
embeddings.
"""
datasets_dimred, genes = integrate(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
return datasets_dimred
# Visualize a scatter plot with cluster labels in the
# `cluster' variable.
def plot_clusters(coords, clusters, s=1):
if coords.shape[0] != clusters.shape[0]:
sys.stderr.write(
'Error: mismatch, {} cells, {} labels\n'
.format(coords.shape[0], clusters.shape[0])
)
exit(1)
colors = np.array(
list(islice(cycle([
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00',
'#ffe119', '#e6194b', '#ffbea3',
'#911eb4', '#46f0f0', '#f032e6',
'#d2f53c', '#008080', '#e6beff',
'#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080',
'#808080', '#fabebe', '#a3f4ff'
]), int(max(clusters) + 1)))
)
plt.figure()
plt.scatter(coords[:, 0], coords[:, 1],
c=colors[clusters], s=s)
# Put datasets into a single matrix with the intersection of all genes.
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False):
if union:
sys.stderr.write(
'WARNING: Integrating based on the union of genes is '
'highly discouraged, consider taking the intersection '
'or requantifying gene expression.\n'
)
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing data set {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [ idx for idx in gene_sort_idx
if uniq_genes[idx] in keep_genes ]
datasets[i] = datasets[i][:, gene_idx]
assert(np.array_equal(uniq_genes[gene_idx], ret_genes))
return datasets, ret_genes
def check_datasets(datasets_full):
datasets_new = []
for i, ds in enumerate(datasets_full):
if issubclass(type(ds), np.ndarray):
datasets_new.append(csr_matrix(ds))
elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):
datasets_new.append(ds)
else:
sys.stderr.write('ERROR: Data sets must be numpy array or '
'scipy.sparse.csr_matrix, received type '
'{}.\n'.format(type(ds)))
exit(1)
return datasets_new
# Randomized SVD.
def dimensionality_reduce(datasets, dimred=DIMRED):
X = vstack(datasets)
X = reduce_dimensionality(X, dim_red_k=dimred)
datasets_dimred = []
base = 0
for ds in datasets:
datasets_dimred.append(X[base:(base + ds.shape[0]), :])
base += ds.shape[0]
return datasets_dimred
# Normalize and reduce dimensionality.
def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):
# Only keep highly variable genes
if not hvg is None and hvg > 0 and hvg < len(genes):
if verbose:
print('Highly variable filter...')
X = vstack(datasets)
disp = dispersion(X)
highest_disp_idx = np.argsort(disp[0])[::-1]
top_genes = set(genes[highest_disp_idx[range(hvg)]])
for i in range(len(datasets)):
gene_idx = [ idx for idx, g_i in enumerate(genes)
if g_i in top_genes ]
datasets[i] = datasets[i][:, gene_idx]
genes = np.array(sorted(top_genes))
# Normalize.
if verbose:
print('Normalizing...')
for i, ds in enumerate(datasets):
datasets[i] = normalize(ds, axis=1)
# Compute compressed embedding.
if dimred > 0:
if verbose:
print('Reducing dimension...')
datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)
if verbose:
print('Done processing.')
return datasets_dimred, genes
if verbose:
print('Done processing.')
return datasets, genes
# Plot t-SNE visualization.
def visualize(assembled, labels, namespace, data_names,
gene_names=None, gene_expr=None, genes=None,
n_iter=N_ITER, perplexity=PERPLEXITY, verbose=VERBOSE,
learn_rate=200., early_exag=12., embedding=None,
shuffle_ds=False, size=1, multicore_tsne=True,
image_suffix='.svg', viz_cluster=False):
# Fit t-SNE.
if embedding is None:
try:
from MulticoreTSNE import MulticoreTSNE
tsne = MulticoreTSNE(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag,
n_jobs=40
)
except ImportError:
multicore_tsne = False
if not multicore_tsne:
tsne = TSNEApprox(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag
)
tsne.fit(np.concatenate(assembled))
embedding = tsne.embedding_
if shuffle_ds:
rand_idx = range(embedding.shape[0])
random.shuffle(list(rand_idx))
embedding = embedding[rand_idx, :]
labels = labels[rand_idx]
# Plot clusters together.
plot_clusters(embedding, labels, s=size)
plt.title(('Panorama ({} iter, perplexity: {}, sigma: {}, ' +
'knn: {}, hvg: {}, dimred: {}, approx: {})')
.format(n_iter, perplexity, SIGMA, KNN, HVG,
DIMRED, APPROX))
plt.savefig(namespace + image_suffix, dpi=500)
# Plot clusters individually.
if viz_cluster and not shuffle_ds:
for i in range(len(data_names)):
visualize_cluster(embedding, i, labels,
cluster_name=data_names[i], size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
# Plot gene expression levels.
if (not gene_names is None) and \
(not gene_expr is None) and \
(not genes is None):
if shuffle_ds:
gene_expr = gene_expr[rand_idx, :]
for gene_name in gene_names:
visualize_expr(gene_expr, embedding,
genes, gene_name, size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
return embedding
# Exact nearest neighbors search.
def nn(ds1, ds2, knn=KNN, metric_p=2):
# Find nearest neighbors of first dataset.
nn_ = NearestNeighbors(knn, p=metric_p)
nn_.fit(ds2)
ind = nn_.kneighbors(ds1, return_distance=False)
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Approximate nearest neighbors using locality sensitive hashing.
def nn_approx(ds1, ds2, knn=KNN, metric='manhattan', n_trees=10):
# Build index.
a = AnnoyIndex(ds2.shape[1], metric=metric)
for i in range(ds2.shape[0]):
a.add_item(i, ds2[i, :])
a.build(n_trees)
# Search index.
ind = []
for i in range(ds1.shape[0]):
ind.append(a.get_nns_by_vector(ds1[i, :], knn, search_k=-1))
ind = np.array(ind)
# Match.
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Find mutual nearest neighbors.
def mnn(ds1, ds2, knn=KNN, approx=APPROX):
# Find nearest neighbors in first direction.
if approx:
match1 = nn_approx(ds1, ds2, knn=knn)
else:
match1 = nn(ds1, ds2, knn=knn)
# Find nearest neighbors in second direction.
if approx:
match2 = nn_approx(ds2, ds1, knn=knn)
else:
match2 = nn(ds2, ds1, knn=knn)
# Compute mutual nearest neighbors.
mutual = match1 & set([ (b, a) for a, b in match2 ])
return mutual
# Visualize alignment between two datasets.
def plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind):
tsne = TSNE(n_iter=400, verbose=VERBOSE, random_state=69)
tsne.fit(curr_ds)
plt.figure()
coords_ds = tsne.embedding_[:, :]
coords_ds[:, 1] += 100
plt.scatter(coords_ds[:, 0], coords_ds[:, 1])
tsne.fit(curr_ref)
coords_ref = tsne.embedding_[:, :]
plt.scatter(coords_ref[:, 0], coords_ref[:, 1])
x_list, y_list = [], []
for r_i, c_i in zip(ds_ind, ref_ind):
x_list.append(coords_ds[r_i, 0])
x_list.append(coords_ref[c_i, 0])
x_list.append(None)
y_list.append(coords_ds[r_i, 1])
y_list.append(coords_ref[c_i, 1])
y_list.append(None)
plt.plot(x_list, y_list, 'b-', alpha=0.3)
plt.show()
# Populate a table (in place) that stores mutual nearest neighbors
# between datasets.
def fill_table(table, i, curr_ds, datasets, base_ds=0,
knn=KNN, approx=APPROX):
curr_ref = np.concatenate(datasets)
if approx:
match = nn_approx(curr_ds, curr_ref, knn=knn)
else:
match = nn(curr_ds, curr_ref, knn=knn, metric_p=1)
# Build interval tree.
itree_ds_idx = IntervalTree()
itree_pos_base = IntervalTree()
pos = 0
for j in range(len(datasets)):
n_cells = datasets[j].shape[0]
itree_ds_idx[pos:(pos + n_cells)] = base_ds + j
itree_pos_base[pos:(pos + n_cells)] = pos
pos += n_cells
# Store all mutual nearest neighbors between datasets.
for d, r in match:
interval = itree_ds_idx[r]
assert(len(interval) == 1)
j = interval.pop().data
interval = itree_pos_base[r]
assert(len(interval) == 1)
base = interval.pop().data
if not (i, j) in table:
table[(i, j)] = set()
table[(i, j)].add((d, r - base))
assert(r - base >= 0)
gs_idxs = None
# Fill table of alignment scores.
def find_alignments_table(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
prenormalized=False, geosketch=False,
geosketch_max=20000):
if not prenormalized:
datasets = [ normalize(ds, axis=1) for ds in datasets ]
if geosketch:
# Only match cells in geometric sketches.
from ample import gs, uniform
global gs_idxs
if gs_idxs is None:
gs_idxs = [ uniform(X, geosketch_max, replace=False)
if X.shape[0] > geosketch_max else range(X.shape[0])
for X in datasets ]
datasets = [ datasets[i][gs_idx, :] for i, gs_idx in enumerate(gs_idxs) ]
table = {}
for i in range(len(datasets)):
if len(datasets[:i]) > 0:
fill_table(table, i, datasets[i], datasets[:i], knn=knn,
approx=approx)
if len(datasets[i+1:]) > 0:
fill_table(table, i, datasets[i], datasets[i+1:],
knn=knn, base_ds=i+1, approx=approx)
# Count all mutual nearest neighbors between datasets.
matches = {}
table1 = {}
if verbose > 1:
table_print = np.zeros((len(datasets), len(datasets)))
for i in range(len(datasets)):
for j in range(len(datasets)):
if i >= j:
continue
if not (i, j) in table or not (j, i) in table:
continue
match_ij = table[(i, j)]
match_ji = set([ (b, a) for a, b in table[(j, i)] ])
matches[(i, j)] = match_ij & match_ji
table1[(i, j)] = (max(
float(len(set([ idx for idx, _ in matches[(i, j)] ]))) /
datasets[i].shape[0],
float(len(set([ idx for _, idx in matches[(i, j)] ]))) /
datasets[j].shape[0]
))
if verbose > 1:
table_print[i, j] += table1[(i, j)]
if geosketch:
# Translate matches within geometric sketches to original indices.
matches_mnn = matches[(i, j)]
matches[(i, j)] = [
(gs_idxs[i][a], gs_idxs[j][b]) for a, b in matches_mnn
]
if verbose > 1:
print(table_print)
return table1, table_print, matches
else:
return table1, None, matches
# Find the matching pairs of cells between datasets.
def find_alignments(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
alpha=ALPHA, prenormalized=False,
geosketch=False, geosketch_max=20000):
table1, _, matches = find_alignments_table(
datasets, knn=knn, approx=approx, verbose=verbose,
prenormalized=prenormalized,
geosketch=geosketch, geosketch_max=geosketch_max
)
alignments = [ (i, j) for (i, j), val in reversed(
sorted(table1.items(), key=operator.itemgetter(1))
) if val > alpha ]
return alignments, matches
# Find connections between datasets to identify panoramas.
def connect(datasets, knn=KNN, approx=APPROX, alpha=ALPHA,
verbose=VERBOSE):
# Find alignments.
alignments, _ = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha,
verbose=verbose
)
if verbose:
print(alignments)
panoramas = []
connected = set()
for i, j in alignments:
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
if len(panoramas_i) == 0:
panoramas_j[0].append(i)
elif len(panoramas_j) == 0:
panoramas_i[0].append(j)
elif panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
connected.add(i)
connected.add(j)
for i in range(len(datasets)):
if not i in connected:
panoramas.append([ i ])
return panoramas
# To reduce memory usage, split bias computation into batches.
def batch_bias(curr_ds, match_ds, bias, batch_size=None, sigma=SIGMA):
if batch_size is None:
weights = rbf_kernel(curr_ds, match_ds, gamma=0.5*sigma)
weights = normalize(weights, axis=1, norm='l1')
avg_bias = np.dot(weights, bias)
return avg_bias
base = 0
avg_bias = np.zeros(curr_ds.shape)
denom = np.zeros(curr_ds.shape[0])
while base < match_ds.shape[0]:
batch_idx = range(
base, min(base + batch_size, match_ds.shape[0])
)
weights = rbf_kernel(curr_ds, match_ds[batch_idx, :],
gamma=0.5*sigma)
avg_bias += np.dot(weights, bias[batch_idx, :])
denom += np.sum(weights, axis=1)
base += batch_size
denom = handle_zeros_in_scale(denom, copy=False)
avg_bias /= denom[:, np.newaxis]
return avg_bias
# Compute nonlinear translation vectors between dataset
# and a reference.
def transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=SIGMA, cn=False,
batch_size=None):
# Compute the matching.
match_ds = curr_ds[ds_ind, :]
match_ref = curr_ref[ref_ind, :]
bias = match_ref - match_ds
if cn:
match_ds = match_ds.toarray()
curr_ds = curr_ds.toarray()
bias = bias.toarray()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
avg_bias = batch_bias(curr_ds, match_ds, bias, sigma=sigma,
batch_size=batch_size)
except RuntimeWarning:
sys.stderr.write('WARNING: Oversmoothing detected, refusing to batch '
'correct, consider lowering sigma value.\n')
return csr_matrix(curr_ds.shape, dtype=float)
except MemoryError:
if batch_size is None:
sys.stderr.write('WARNING: Out of memory, consider turning on '
'batched computation with batch_size parameter.\n')
else:
sys.stderr.write('WARNING: Out of memory, consider lowering '
'the batch_size parameter.\n')
return csr_matrix(curr_ds.shape, dtype=float)
if cn:
avg_bias = csr_matrix(avg_bias)
return avg_bias
# Finds alignments between datasets and uses them to construct
# panoramas. "Merges" datasets by correcting gene expression
# values.
def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,
sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,
ds_names=None, batch_size=None,
geosketch=False, geosketch_max=20000, alignments=None, matches=None):
if len(datasets) == 1:
return datasets
if alignments is None and matches is None:
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,
geosketch=geosketch, geosketch_max=geosketch_max
)
ds_assembled = {}
panoramas = []
for i, j in alignments:
if verbose:
if ds_names is None:
print('Processing datasets {}'.format((i, j)))
else:
print('Processing datasets {} <=> {}'.
format(ds_names[i], ds_names[j]))
# Only consider a dataset a fixed amount of times.
if not i in ds_assembled:
ds_assembled[i] = 0
ds_assembled[i] += 1
if not j in ds_assembled:
ds_assembled[j] = 0
ds_assembled[j] += 1
if ds_assembled[i] > 3 and ds_assembled[j] > 3:
continue
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
if datasets[i].shape[0] < datasets[j].shape[0]:
i, j = j, i
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
# Map dataset i to panorama j.
if len(panoramas_i) == 0:
curr_ds = datasets[i]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
match = []
base = 0
for p in panoramas_j[0]:
if i < p and (i, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(i, p)] ])
elif i > p and (p, i) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[i] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[i]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
expr_datasets[i] = curr_ds + bias
panoramas_j[0].append(i)
# Map dataset j to panorama i.
elif len(panoramas_j) == 0:
curr_ds = datasets[j]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
match = []
base = 0
for p in panoramas_i[0]:
if j < p and (j, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(j, p)] ])
elif j > p and (p, j) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[j]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
cn=True, batch_size=batch_size)
expr_datasets[j] = curr_ds + bias
panoramas_i[0].append(j)
# Merge two panoramas together.
else:
curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
# Find base indices into each panorama.
base_i = 0
for p in panoramas_i[0]:
if p == i: break
base_i += datasets[p].shape[0]
base_j = 0
for p in panoramas_j[0]:
if p == j: break
base_j += datasets[p].shape[0]
# Find matching indices.
match = []
base = 0
for p in panoramas_i[0]:
if p == i and j < p and (j, p) in matches:
match.extend([ (b + base, a + base_j)
for a, b in matches[(j, p)] ])
elif p == i and j > p and (p, j) in matches:
match.extend([ (a + base, b + base_j)
for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
base = 0
for p in panoramas_j[0]:
if p == j and i < p and (i, p) in matches:
match.extend([ (a + base_i, b + base)
for a, b in matches[(i, p)] ])
elif p == j and i > p and (p, i) in matches:
match.extend([ (b + base_i, a + base)
for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
# Apply transformation to entire panorama.
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = datasets[p].shape[0]
datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
if not expr_datasets is None:
curr_ds = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = expr_datasets[p].shape[0]
expr_datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
# Merge panoramas i and j and delete one.
if panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
# Visualize.
if view_match:
plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)
return datasets
# Non-optimal dataset assembly. Simply accumulate datasets into a
# reference.
def assemble_accum(datasets, verbose=VERBOSE, knn=KNN, sigma=SIGMA,
approx=APPROX, batch_size=None):
if len(datasets) == 1:
return datasets
for i in range(len(datasets) - 1):
j = i + 1
if verbose:
print('Processing datasets {}'.format((i, j)))
ds1 = datasets[j]
ds2 = np.concatenate(datasets[:i+1])
match = mnn(ds1, ds2, knn=knn, approx=approx)
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(ds1, ds2, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = ds1 + bias
return datasets
def interpret_alignments(datasets, expr_datasets, genes,
verbose=VERBOSE, knn=KNN, approx=APPROX,
alpha=ALPHA, n_permutations=None):
if n_permutations is None:
n_permutations = float(len(genes) * 30)
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose
)
for i, j in alignments:
# Compute average bias vector that aligns two datasets together.
ds_i = expr_datasets[i]
ds_j = expr_datasets[j]
if i < j:
match = matches[(i, j)]
else:
match = matches[(j, i)]
i_ind = [ a for a, _ in match ]
j_ind = [ b for _, b in match ]
avg_bias = np.absolute(
np.mean(ds_j[j_ind, :] - ds_i[i_ind, :], axis=0)
)
# Construct null distribution and compute p-value.
null_bias = (
ds_j[np.random.randint(ds_j.shape[0], size=n_permutations), :] -
ds_i[np.random.randint(ds_i.shape[0], size=n_permutations), :]
)
p = ((np.sum(np.greater_equal(
np.absolute(np.tile(avg_bias, (n_permutations, 1))),
np.absolute(null_bias)
), axis=0, dtype=float) + 1) / (n_permutations + 1))
print('>>>> Stats for alignment {}'.format((i, j)))
for k in range(len(p)):
print('{}\t{}'.format(genes[k], p[k]))
|
brianhie/scanorama
|
scanorama/scanorama.py
|
integrate
|
python
|
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE,
verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX,
sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False,
geosketch_max=20000, n_iter=1, union=False, hvg=None):
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
for _ in range(n_iter):
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
return datasets_dimred, genes
|
Integrate a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
integrated, genes
Returns a two-tuple containing a list of `numpy.ndarray` with
integrated low dimensional embeddings and a single list of genes
containing the intersection of inputted genes.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L114-L169
|
[
"def merge_datasets(datasets, genes, ds_names=None, verbose=True,\n union=False):\n if union:\n sys.stderr.write(\n 'WARNING: Integrating based on the union of genes is '\n 'highly discouraged, consider taking the intersection '\n 'or requantifying gene expression.\\n'\n )\n\n # Find genes in common.\n keep_genes = set()\n for idx, gene_list in enumerate(genes):\n if len(keep_genes) == 0:\n keep_genes = set(gene_list)\n elif union:\n keep_genes |= set(gene_list)\n else:\n keep_genes &= set(gene_list)\n if not union and not ds_names is None and verbose:\n print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))\n if len(keep_genes) == 0:\n print('Error: No genes found in all datasets, exiting...')\n exit(1)\n if verbose:\n print('Found {} genes among all datasets'\n .format(len(keep_genes)))\n\n if union:\n union_genes = sorted(keep_genes)\n for i in range(len(datasets)):\n if verbose:\n print('Processing data set {}'.format(i))\n X_new = np.zeros((datasets[i].shape[0], len(union_genes)))\n X_old = csc_matrix(datasets[i])\n gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }\n for j, gene in enumerate(union_genes):\n if gene in gene_to_idx:\n X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()\n datasets[i] = csr_matrix(X_new)\n ret_genes = np.array(union_genes)\n else:\n # Only keep genes in common.\n ret_genes = np.array(sorted(keep_genes))\n for i in range(len(datasets)):\n # Remove duplicate genes.\n uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)\n datasets[i] = datasets[i][:, uniq_idx]\n\n # Do gene filtering.\n gene_sort_idx = np.argsort(uniq_genes)\n gene_idx = [ idx for idx in gene_sort_idx\n if uniq_genes[idx] in keep_genes ]\n datasets[i] = datasets[i][:, gene_idx]\n assert(np.array_equal(uniq_genes[gene_idx], ret_genes))\n\n return datasets, ret_genes\n",
"def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):\n # Only keep highly variable genes\n if not hvg is None and hvg > 0 and hvg < len(genes):\n if verbose:\n print('Highly variable filter...')\n X = vstack(datasets)\n disp = dispersion(X)\n highest_disp_idx = np.argsort(disp[0])[::-1]\n top_genes = set(genes[highest_disp_idx[range(hvg)]])\n for i in range(len(datasets)):\n gene_idx = [ idx for idx, g_i in enumerate(genes)\n if g_i in top_genes ]\n datasets[i] = datasets[i][:, gene_idx]\n genes = np.array(sorted(top_genes))\n\n # Normalize.\n if verbose:\n print('Normalizing...')\n for i, ds in enumerate(datasets):\n datasets[i] = normalize(ds, axis=1)\n\n # Compute compressed embedding.\n if dimred > 0:\n if verbose:\n print('Reducing dimension...')\n datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)\n if verbose:\n print('Done processing.')\n return datasets_dimred, genes\n\n if verbose:\n print('Done processing.')\n\n return datasets, genes\n",
"def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,\n sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,\n ds_names=None, batch_size=None,\n geosketch=False, geosketch_max=20000, alignments=None, matches=None):\n if len(datasets) == 1:\n return datasets\n\n if alignments is None and matches is None:\n alignments, matches = find_alignments(\n datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,\n geosketch=geosketch, geosketch_max=geosketch_max\n )\n\n ds_assembled = {}\n panoramas = []\n for i, j in alignments:\n if verbose:\n if ds_names is None:\n print('Processing datasets {}'.format((i, j)))\n else:\n print('Processing datasets {} <=> {}'.\n format(ds_names[i], ds_names[j]))\n\n # Only consider a dataset a fixed amount of times.\n if not i in ds_assembled:\n ds_assembled[i] = 0\n ds_assembled[i] += 1\n if not j in ds_assembled:\n ds_assembled[j] = 0\n ds_assembled[j] += 1\n if ds_assembled[i] > 3 and ds_assembled[j] > 3:\n continue\n\n # See if datasets are involved in any current panoramas.\n panoramas_i = [ panoramas[p] for p in range(len(panoramas))\n if i in panoramas[p] ]\n assert(len(panoramas_i) <= 1)\n panoramas_j = [ panoramas[p] for p in range(len(panoramas))\n if j in panoramas[p] ]\n assert(len(panoramas_j) <= 1)\n\n if len(panoramas_i) == 0 and len(panoramas_j) == 0:\n if datasets[i].shape[0] < datasets[j].shape[0]:\n i, j = j, i\n panoramas.append([ i ])\n panoramas_i = [ panoramas[-1] ]\n\n # Map dataset i to panorama j.\n if len(panoramas_i) == 0:\n curr_ds = datasets[i]\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n match = []\n base = 0\n for p in panoramas_j[0]:\n if i < p and (i, p) in matches:\n match.extend([ (a, b + base) for a, b in matches[(i, p)] ])\n elif i > p and (p, i) in matches:\n match.extend([ (b, a + base) for a, b in matches[(p, i)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n datasets[i] = curr_ds + bias\n\n if expr_datasets:\n curr_ds = expr_datasets[i]\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_j[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n sigma=sigma, cn=True, batch_size=batch_size)\n expr_datasets[i] = curr_ds + bias\n\n panoramas_j[0].append(i)\n\n # Map dataset j to panorama i.\n elif len(panoramas_j) == 0:\n curr_ds = datasets[j]\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n\n match = []\n base = 0\n for p in panoramas_i[0]:\n if j < p and (j, p) in matches:\n match.extend([ (a, b + base) for a, b in matches[(j, p)] ])\n elif j > p and (p, j) in matches:\n match.extend([ (b, a + base) for a, b in matches[(p, j)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n datasets[j] = curr_ds + bias\n\n if expr_datasets:\n curr_ds = expr_datasets[j]\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_i[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n cn=True, batch_size=batch_size)\n expr_datasets[j] = curr_ds + bias\n\n panoramas_i[0].append(j)\n\n # Merge two panoramas together.\n else:\n curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])\n curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])\n\n # Find base indices into each panorama.\n base_i = 0\n for p in panoramas_i[0]:\n if p == i: break\n base_i += datasets[p].shape[0]\n base_j = 0\n for p in panoramas_j[0]:\n if p == j: break\n base_j += datasets[p].shape[0]\n\n # Find matching indices.\n match = []\n base = 0\n for p in panoramas_i[0]:\n if p == i and j < p and (j, p) in matches:\n match.extend([ (b + base, a + base_j)\n for a, b in matches[(j, p)] ])\n elif p == i and j > p and (p, j) in matches:\n match.extend([ (a + base, b + base_j)\n for a, b in matches[(p, j)] ])\n base += datasets[p].shape[0]\n base = 0\n for p in panoramas_j[0]:\n if p == j and i < p and (i, p) in matches:\n match.extend([ (a + base_i, b + base)\n for a, b in matches[(i, p)] ])\n elif p == j and i > p and (p, i) in matches:\n match.extend([ (b + base_i, a + base)\n for a, b in matches[(p, i)] ])\n base += datasets[p].shape[0]\n\n ds_ind = [ a for a, _ in match ]\n ref_ind = [ b for _, b in match ]\n\n # Apply transformation to entire panorama.\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,\n batch_size=batch_size)\n curr_ds += bias\n base = 0\n for p in panoramas_i[0]:\n n_cells = datasets[p].shape[0]\n datasets[p] = curr_ds[base:(base + n_cells), :]\n base += n_cells\n\n if not expr_datasets is None:\n curr_ds = vstack([ expr_datasets[p]\n for p in panoramas_i[0] ])\n curr_ref = vstack([ expr_datasets[p]\n for p in panoramas_j[0] ])\n bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,\n sigma=sigma, cn=True, batch_size=batch_size)\n curr_ds += bias\n base = 0\n for p in panoramas_i[0]:\n n_cells = expr_datasets[p].shape[0]\n expr_datasets[p] = curr_ds[base:(base + n_cells), :]\n base += n_cells\n\n # Merge panoramas i and j and delete one.\n if panoramas_i[0] != panoramas_j[0]:\n panoramas_i[0] += panoramas_j[0]\n panoramas.remove(panoramas_j[0])\n\n # Visualize.\n if view_match:\n plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)\n\n return datasets\n",
"def check_datasets(datasets_full):\n datasets_new = []\n for i, ds in enumerate(datasets_full):\n if issubclass(type(ds), np.ndarray):\n datasets_new.append(csr_matrix(ds))\n elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):\n datasets_new.append(ds)\n else:\n sys.stderr.write('ERROR: Data sets must be numpy array or '\n 'scipy.sparse.csr_matrix, received type '\n '{}.\\n'.format(type(ds)))\n exit(1)\n return datasets_new\n"
] |
from annoy import AnnoyIndex
from intervaltree import IntervalTree
from itertools import cycle, islice
import numpy as np
import operator
import random
import scipy
from scipy.sparse import csc_matrix, csr_matrix, vstack
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import rbf_kernel, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import sys
import warnings
from .t_sne_approx import TSNEApprox
from .utils import plt, dispersion, reduce_dimensionality
from .utils import visualize_cluster, visualize_expr, visualize_dropout
from .utils import handle_zeros_in_scale
np.random.seed(0)
random.seed(0)
# Default parameters.
ALPHA = 0.10
APPROX = True
BATCH_SIZE = 5000
DIMRED = 100
HVG = None
KNN = 20
N_ITER = 500
PERPLEXITY = 1200
SIGMA = 15
VERBOSE = 2
# Do batch correction on a list of data sets.
def correct(datasets_full, genes_list, return_dimred=False,
batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,
dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,
return_dense=False, hvg=None, union=False,
geosketch=False, geosketch_max=20000):
"""Integrate and batch correct a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
return_dimred: `bool`, optional (default: `False`)
In addition to returning batch corrected matrices, also returns
integrated low-dimesional embeddings.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
return_dense: `bool`, optional (default: `False`)
Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
corrected, genes
By default (`return_dimred=False`), returns a two-tuple containing a
list of `scipy.sparse.csr_matrix` each with batch corrected values,
and a single list of genes containing the intersection of inputted
genes.
integrated, corrected, genes
When `return_dimred=False`, returns a three-tuple containing a list
of `numpy.ndarray` with integrated low dimensional embeddings, a list
of `scipy.sparse.csr_matrix` each with batch corrected values, and a
a single list of genes containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
expr_datasets=datasets, # Modified in place.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
if return_dense:
datasets = [ ds.toarray() for ds in datasets ]
if return_dimred:
return datasets_dimred, datasets, genes
return datasets, genes
# Integrate a list of data sets.
# Batch correction with scanpy's AnnData object.
def correct_scanpy(adatas, **kwargs):
"""Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
"""
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas
# Integration with scanpy's AnnData object.
def integrate_scanpy(adatas, **kwargs):
"""Integrate a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate.
kwargs : `dict`
See documentation for the `integrate()` method for a full list of
parameters to use for batch correction.
Returns
-------
integrated
Returns a list of `np.ndarray` with integrated low-dimensional
embeddings.
"""
datasets_dimred, genes = integrate(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
return datasets_dimred
# Visualize a scatter plot with cluster labels in the
# `cluster' variable.
def plot_clusters(coords, clusters, s=1):
if coords.shape[0] != clusters.shape[0]:
sys.stderr.write(
'Error: mismatch, {} cells, {} labels\n'
.format(coords.shape[0], clusters.shape[0])
)
exit(1)
colors = np.array(
list(islice(cycle([
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00',
'#ffe119', '#e6194b', '#ffbea3',
'#911eb4', '#46f0f0', '#f032e6',
'#d2f53c', '#008080', '#e6beff',
'#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080',
'#808080', '#fabebe', '#a3f4ff'
]), int(max(clusters) + 1)))
)
plt.figure()
plt.scatter(coords[:, 0], coords[:, 1],
c=colors[clusters], s=s)
# Put datasets into a single matrix with the intersection of all genes.
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False):
if union:
sys.stderr.write(
'WARNING: Integrating based on the union of genes is '
'highly discouraged, consider taking the intersection '
'or requantifying gene expression.\n'
)
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing data set {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [ idx for idx in gene_sort_idx
if uniq_genes[idx] in keep_genes ]
datasets[i] = datasets[i][:, gene_idx]
assert(np.array_equal(uniq_genes[gene_idx], ret_genes))
return datasets, ret_genes
def check_datasets(datasets_full):
datasets_new = []
for i, ds in enumerate(datasets_full):
if issubclass(type(ds), np.ndarray):
datasets_new.append(csr_matrix(ds))
elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):
datasets_new.append(ds)
else:
sys.stderr.write('ERROR: Data sets must be numpy array or '
'scipy.sparse.csr_matrix, received type '
'{}.\n'.format(type(ds)))
exit(1)
return datasets_new
# Randomized SVD.
def dimensionality_reduce(datasets, dimred=DIMRED):
X = vstack(datasets)
X = reduce_dimensionality(X, dim_red_k=dimred)
datasets_dimred = []
base = 0
for ds in datasets:
datasets_dimred.append(X[base:(base + ds.shape[0]), :])
base += ds.shape[0]
return datasets_dimred
# Normalize and reduce dimensionality.
def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):
# Only keep highly variable genes
if not hvg is None and hvg > 0 and hvg < len(genes):
if verbose:
print('Highly variable filter...')
X = vstack(datasets)
disp = dispersion(X)
highest_disp_idx = np.argsort(disp[0])[::-1]
top_genes = set(genes[highest_disp_idx[range(hvg)]])
for i in range(len(datasets)):
gene_idx = [ idx for idx, g_i in enumerate(genes)
if g_i in top_genes ]
datasets[i] = datasets[i][:, gene_idx]
genes = np.array(sorted(top_genes))
# Normalize.
if verbose:
print('Normalizing...')
for i, ds in enumerate(datasets):
datasets[i] = normalize(ds, axis=1)
# Compute compressed embedding.
if dimred > 0:
if verbose:
print('Reducing dimension...')
datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)
if verbose:
print('Done processing.')
return datasets_dimred, genes
if verbose:
print('Done processing.')
return datasets, genes
# Plot t-SNE visualization.
def visualize(assembled, labels, namespace, data_names,
gene_names=None, gene_expr=None, genes=None,
n_iter=N_ITER, perplexity=PERPLEXITY, verbose=VERBOSE,
learn_rate=200., early_exag=12., embedding=None,
shuffle_ds=False, size=1, multicore_tsne=True,
image_suffix='.svg', viz_cluster=False):
# Fit t-SNE.
if embedding is None:
try:
from MulticoreTSNE import MulticoreTSNE
tsne = MulticoreTSNE(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag,
n_jobs=40
)
except ImportError:
multicore_tsne = False
if not multicore_tsne:
tsne = TSNEApprox(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag
)
tsne.fit(np.concatenate(assembled))
embedding = tsne.embedding_
if shuffle_ds:
rand_idx = range(embedding.shape[0])
random.shuffle(list(rand_idx))
embedding = embedding[rand_idx, :]
labels = labels[rand_idx]
# Plot clusters together.
plot_clusters(embedding, labels, s=size)
plt.title(('Panorama ({} iter, perplexity: {}, sigma: {}, ' +
'knn: {}, hvg: {}, dimred: {}, approx: {})')
.format(n_iter, perplexity, SIGMA, KNN, HVG,
DIMRED, APPROX))
plt.savefig(namespace + image_suffix, dpi=500)
# Plot clusters individually.
if viz_cluster and not shuffle_ds:
for i in range(len(data_names)):
visualize_cluster(embedding, i, labels,
cluster_name=data_names[i], size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
# Plot gene expression levels.
if (not gene_names is None) and \
(not gene_expr is None) and \
(not genes is None):
if shuffle_ds:
gene_expr = gene_expr[rand_idx, :]
for gene_name in gene_names:
visualize_expr(gene_expr, embedding,
genes, gene_name, size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
return embedding
# Exact nearest neighbors search.
def nn(ds1, ds2, knn=KNN, metric_p=2):
# Find nearest neighbors of first dataset.
nn_ = NearestNeighbors(knn, p=metric_p)
nn_.fit(ds2)
ind = nn_.kneighbors(ds1, return_distance=False)
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Approximate nearest neighbors using locality sensitive hashing.
def nn_approx(ds1, ds2, knn=KNN, metric='manhattan', n_trees=10):
# Build index.
a = AnnoyIndex(ds2.shape[1], metric=metric)
for i in range(ds2.shape[0]):
a.add_item(i, ds2[i, :])
a.build(n_trees)
# Search index.
ind = []
for i in range(ds1.shape[0]):
ind.append(a.get_nns_by_vector(ds1[i, :], knn, search_k=-1))
ind = np.array(ind)
# Match.
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Find mutual nearest neighbors.
def mnn(ds1, ds2, knn=KNN, approx=APPROX):
# Find nearest neighbors in first direction.
if approx:
match1 = nn_approx(ds1, ds2, knn=knn)
else:
match1 = nn(ds1, ds2, knn=knn)
# Find nearest neighbors in second direction.
if approx:
match2 = nn_approx(ds2, ds1, knn=knn)
else:
match2 = nn(ds2, ds1, knn=knn)
# Compute mutual nearest neighbors.
mutual = match1 & set([ (b, a) for a, b in match2 ])
return mutual
# Visualize alignment between two datasets.
def plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind):
tsne = TSNE(n_iter=400, verbose=VERBOSE, random_state=69)
tsne.fit(curr_ds)
plt.figure()
coords_ds = tsne.embedding_[:, :]
coords_ds[:, 1] += 100
plt.scatter(coords_ds[:, 0], coords_ds[:, 1])
tsne.fit(curr_ref)
coords_ref = tsne.embedding_[:, :]
plt.scatter(coords_ref[:, 0], coords_ref[:, 1])
x_list, y_list = [], []
for r_i, c_i in zip(ds_ind, ref_ind):
x_list.append(coords_ds[r_i, 0])
x_list.append(coords_ref[c_i, 0])
x_list.append(None)
y_list.append(coords_ds[r_i, 1])
y_list.append(coords_ref[c_i, 1])
y_list.append(None)
plt.plot(x_list, y_list, 'b-', alpha=0.3)
plt.show()
# Populate a table (in place) that stores mutual nearest neighbors
# between datasets.
def fill_table(table, i, curr_ds, datasets, base_ds=0,
knn=KNN, approx=APPROX):
curr_ref = np.concatenate(datasets)
if approx:
match = nn_approx(curr_ds, curr_ref, knn=knn)
else:
match = nn(curr_ds, curr_ref, knn=knn, metric_p=1)
# Build interval tree.
itree_ds_idx = IntervalTree()
itree_pos_base = IntervalTree()
pos = 0
for j in range(len(datasets)):
n_cells = datasets[j].shape[0]
itree_ds_idx[pos:(pos + n_cells)] = base_ds + j
itree_pos_base[pos:(pos + n_cells)] = pos
pos += n_cells
# Store all mutual nearest neighbors between datasets.
for d, r in match:
interval = itree_ds_idx[r]
assert(len(interval) == 1)
j = interval.pop().data
interval = itree_pos_base[r]
assert(len(interval) == 1)
base = interval.pop().data
if not (i, j) in table:
table[(i, j)] = set()
table[(i, j)].add((d, r - base))
assert(r - base >= 0)
gs_idxs = None
# Fill table of alignment scores.
def find_alignments_table(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
prenormalized=False, geosketch=False,
geosketch_max=20000):
if not prenormalized:
datasets = [ normalize(ds, axis=1) for ds in datasets ]
if geosketch:
# Only match cells in geometric sketches.
from ample import gs, uniform
global gs_idxs
if gs_idxs is None:
gs_idxs = [ uniform(X, geosketch_max, replace=False)
if X.shape[0] > geosketch_max else range(X.shape[0])
for X in datasets ]
datasets = [ datasets[i][gs_idx, :] for i, gs_idx in enumerate(gs_idxs) ]
table = {}
for i in range(len(datasets)):
if len(datasets[:i]) > 0:
fill_table(table, i, datasets[i], datasets[:i], knn=knn,
approx=approx)
if len(datasets[i+1:]) > 0:
fill_table(table, i, datasets[i], datasets[i+1:],
knn=knn, base_ds=i+1, approx=approx)
# Count all mutual nearest neighbors between datasets.
matches = {}
table1 = {}
if verbose > 1:
table_print = np.zeros((len(datasets), len(datasets)))
for i in range(len(datasets)):
for j in range(len(datasets)):
if i >= j:
continue
if not (i, j) in table or not (j, i) in table:
continue
match_ij = table[(i, j)]
match_ji = set([ (b, a) for a, b in table[(j, i)] ])
matches[(i, j)] = match_ij & match_ji
table1[(i, j)] = (max(
float(len(set([ idx for idx, _ in matches[(i, j)] ]))) /
datasets[i].shape[0],
float(len(set([ idx for _, idx in matches[(i, j)] ]))) /
datasets[j].shape[0]
))
if verbose > 1:
table_print[i, j] += table1[(i, j)]
if geosketch:
# Translate matches within geometric sketches to original indices.
matches_mnn = matches[(i, j)]
matches[(i, j)] = [
(gs_idxs[i][a], gs_idxs[j][b]) for a, b in matches_mnn
]
if verbose > 1:
print(table_print)
return table1, table_print, matches
else:
return table1, None, matches
# Find the matching pairs of cells between datasets.
def find_alignments(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
alpha=ALPHA, prenormalized=False,
geosketch=False, geosketch_max=20000):
table1, _, matches = find_alignments_table(
datasets, knn=knn, approx=approx, verbose=verbose,
prenormalized=prenormalized,
geosketch=geosketch, geosketch_max=geosketch_max
)
alignments = [ (i, j) for (i, j), val in reversed(
sorted(table1.items(), key=operator.itemgetter(1))
) if val > alpha ]
return alignments, matches
# Find connections between datasets to identify panoramas.
def connect(datasets, knn=KNN, approx=APPROX, alpha=ALPHA,
verbose=VERBOSE):
# Find alignments.
alignments, _ = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha,
verbose=verbose
)
if verbose:
print(alignments)
panoramas = []
connected = set()
for i, j in alignments:
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
if len(panoramas_i) == 0:
panoramas_j[0].append(i)
elif len(panoramas_j) == 0:
panoramas_i[0].append(j)
elif panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
connected.add(i)
connected.add(j)
for i in range(len(datasets)):
if not i in connected:
panoramas.append([ i ])
return panoramas
# To reduce memory usage, split bias computation into batches.
def batch_bias(curr_ds, match_ds, bias, batch_size=None, sigma=SIGMA):
if batch_size is None:
weights = rbf_kernel(curr_ds, match_ds, gamma=0.5*sigma)
weights = normalize(weights, axis=1, norm='l1')
avg_bias = np.dot(weights, bias)
return avg_bias
base = 0
avg_bias = np.zeros(curr_ds.shape)
denom = np.zeros(curr_ds.shape[0])
while base < match_ds.shape[0]:
batch_idx = range(
base, min(base + batch_size, match_ds.shape[0])
)
weights = rbf_kernel(curr_ds, match_ds[batch_idx, :],
gamma=0.5*sigma)
avg_bias += np.dot(weights, bias[batch_idx, :])
denom += np.sum(weights, axis=1)
base += batch_size
denom = handle_zeros_in_scale(denom, copy=False)
avg_bias /= denom[:, np.newaxis]
return avg_bias
# Compute nonlinear translation vectors between dataset
# and a reference.
def transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=SIGMA, cn=False,
batch_size=None):
# Compute the matching.
match_ds = curr_ds[ds_ind, :]
match_ref = curr_ref[ref_ind, :]
bias = match_ref - match_ds
if cn:
match_ds = match_ds.toarray()
curr_ds = curr_ds.toarray()
bias = bias.toarray()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
avg_bias = batch_bias(curr_ds, match_ds, bias, sigma=sigma,
batch_size=batch_size)
except RuntimeWarning:
sys.stderr.write('WARNING: Oversmoothing detected, refusing to batch '
'correct, consider lowering sigma value.\n')
return csr_matrix(curr_ds.shape, dtype=float)
except MemoryError:
if batch_size is None:
sys.stderr.write('WARNING: Out of memory, consider turning on '
'batched computation with batch_size parameter.\n')
else:
sys.stderr.write('WARNING: Out of memory, consider lowering '
'the batch_size parameter.\n')
return csr_matrix(curr_ds.shape, dtype=float)
if cn:
avg_bias = csr_matrix(avg_bias)
return avg_bias
# Finds alignments between datasets and uses them to construct
# panoramas. "Merges" datasets by correcting gene expression
# values.
def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,
sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,
ds_names=None, batch_size=None,
geosketch=False, geosketch_max=20000, alignments=None, matches=None):
if len(datasets) == 1:
return datasets
if alignments is None and matches is None:
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,
geosketch=geosketch, geosketch_max=geosketch_max
)
ds_assembled = {}
panoramas = []
for i, j in alignments:
if verbose:
if ds_names is None:
print('Processing datasets {}'.format((i, j)))
else:
print('Processing datasets {} <=> {}'.
format(ds_names[i], ds_names[j]))
# Only consider a dataset a fixed amount of times.
if not i in ds_assembled:
ds_assembled[i] = 0
ds_assembled[i] += 1
if not j in ds_assembled:
ds_assembled[j] = 0
ds_assembled[j] += 1
if ds_assembled[i] > 3 and ds_assembled[j] > 3:
continue
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
if datasets[i].shape[0] < datasets[j].shape[0]:
i, j = j, i
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
# Map dataset i to panorama j.
if len(panoramas_i) == 0:
curr_ds = datasets[i]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
match = []
base = 0
for p in panoramas_j[0]:
if i < p and (i, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(i, p)] ])
elif i > p and (p, i) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[i] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[i]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
expr_datasets[i] = curr_ds + bias
panoramas_j[0].append(i)
# Map dataset j to panorama i.
elif len(panoramas_j) == 0:
curr_ds = datasets[j]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
match = []
base = 0
for p in panoramas_i[0]:
if j < p and (j, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(j, p)] ])
elif j > p and (p, j) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[j]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
cn=True, batch_size=batch_size)
expr_datasets[j] = curr_ds + bias
panoramas_i[0].append(j)
# Merge two panoramas together.
else:
curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
# Find base indices into each panorama.
base_i = 0
for p in panoramas_i[0]:
if p == i: break
base_i += datasets[p].shape[0]
base_j = 0
for p in panoramas_j[0]:
if p == j: break
base_j += datasets[p].shape[0]
# Find matching indices.
match = []
base = 0
for p in panoramas_i[0]:
if p == i and j < p and (j, p) in matches:
match.extend([ (b + base, a + base_j)
for a, b in matches[(j, p)] ])
elif p == i and j > p and (p, j) in matches:
match.extend([ (a + base, b + base_j)
for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
base = 0
for p in panoramas_j[0]:
if p == j and i < p and (i, p) in matches:
match.extend([ (a + base_i, b + base)
for a, b in matches[(i, p)] ])
elif p == j and i > p and (p, i) in matches:
match.extend([ (b + base_i, a + base)
for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
# Apply transformation to entire panorama.
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = datasets[p].shape[0]
datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
if not expr_datasets is None:
curr_ds = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = expr_datasets[p].shape[0]
expr_datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
# Merge panoramas i and j and delete one.
if panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
# Visualize.
if view_match:
plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)
return datasets
# Non-optimal dataset assembly. Simply accumulate datasets into a
# reference.
def assemble_accum(datasets, verbose=VERBOSE, knn=KNN, sigma=SIGMA,
approx=APPROX, batch_size=None):
if len(datasets) == 1:
return datasets
for i in range(len(datasets) - 1):
j = i + 1
if verbose:
print('Processing datasets {}'.format((i, j)))
ds1 = datasets[j]
ds2 = np.concatenate(datasets[:i+1])
match = mnn(ds1, ds2, knn=knn, approx=approx)
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(ds1, ds2, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = ds1 + bias
return datasets
def interpret_alignments(datasets, expr_datasets, genes,
verbose=VERBOSE, knn=KNN, approx=APPROX,
alpha=ALPHA, n_permutations=None):
if n_permutations is None:
n_permutations = float(len(genes) * 30)
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose
)
for i, j in alignments:
# Compute average bias vector that aligns two datasets together.
ds_i = expr_datasets[i]
ds_j = expr_datasets[j]
if i < j:
match = matches[(i, j)]
else:
match = matches[(j, i)]
i_ind = [ a for a, _ in match ]
j_ind = [ b for _, b in match ]
avg_bias = np.absolute(
np.mean(ds_j[j_ind, :] - ds_i[i_ind, :], axis=0)
)
# Construct null distribution and compute p-value.
null_bias = (
ds_j[np.random.randint(ds_j.shape[0], size=n_permutations), :] -
ds_i[np.random.randint(ds_i.shape[0], size=n_permutations), :]
)
p = ((np.sum(np.greater_equal(
np.absolute(np.tile(avg_bias, (n_permutations, 1))),
np.absolute(null_bias)
), axis=0, dtype=float) + 1) / (n_permutations + 1))
print('>>>> Stats for alignment {}'.format((i, j)))
for k in range(len(p)):
print('{}\t{}'.format(genes[k], p[k]))
|
brianhie/scanorama
|
scanorama/scanorama.py
|
correct_scanpy
|
python
|
def correct_scanpy(adatas, **kwargs):
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas
|
Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L172-L216
|
[
"def correct(datasets_full, genes_list, return_dimred=False,\n batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,\n dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,\n return_dense=False, hvg=None, union=False,\n geosketch=False, geosketch_max=20000):\n \"\"\"Integrate and batch correct a list of data sets.\n\n Parameters\n ----------\n datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`\n Data sets to integrate and correct.\n genes_list: `list` of `list` of `string`\n List of genes for each data set.\n return_dimred: `bool`, optional (default: `False`)\n In addition to returning batch corrected matrices, also returns\n integrated low-dimesional embeddings.\n batch_size: `int`, optional (default: `5000`)\n The batch size used in the alignment vector computation. Useful when\n correcting very large (>100k samples) data sets. Set to large value\n that runs within available memory.\n verbose: `bool` or `int`, optional (default: 2)\n When `True` or not equal to 0, prints logging output.\n ds_names: `list` of `string`, optional\n When `verbose=True`, reports data set names in logging output.\n dimred: `int`, optional (default: 100)\n Dimensionality of integrated embedding.\n approx: `bool`, optional (default: `True`)\n Use approximate nearest neighbors, greatly speeds up matching runtime.\n sigma: `float`, optional (default: 15)\n Correction smoothing parameter on Gaussian kernel.\n alpha: `float`, optional (default: 0.10)\n Alignment score minimum cutoff.\n knn: `int`, optional (default: 20)\n Number of nearest neighbors to use for matching.\n return_dense: `bool`, optional (default: `False`)\n Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.\n hvg: `int`, optional (default: None)\n Use this number of top highly variable genes based on dispersion.\n\n Returns\n -------\n corrected, genes\n By default (`return_dimred=False`), returns a two-tuple containing a\n list of `scipy.sparse.csr_matrix` each with batch corrected values,\n and a single list of genes containing the intersection of inputted\n genes.\n\n integrated, corrected, genes\n When `return_dimred=False`, returns a three-tuple containing a list\n of `numpy.ndarray` with integrated low dimensional embeddings, a list\n of `scipy.sparse.csr_matrix` each with batch corrected values, and a\n a single list of genes containing the intersection of inputted genes.\n \"\"\"\n datasets_full = check_datasets(datasets_full)\n\n datasets, genes = merge_datasets(datasets_full, genes_list,\n ds_names=ds_names, union=union)\n datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,\n dimred=dimred)\n\n datasets_dimred = assemble(\n datasets_dimred, # Assemble in low dimensional space.\n expr_datasets=datasets, # Modified in place.\n verbose=verbose, knn=knn, sigma=sigma, approx=approx,\n alpha=alpha, ds_names=ds_names, batch_size=batch_size,\n geosketch=geosketch, geosketch_max=geosketch_max,\n )\n\n if return_dense:\n datasets = [ ds.toarray() for ds in datasets ]\n\n if return_dimred:\n return datasets_dimred, datasets, genes\n\n return datasets, genes\n"
] |
from annoy import AnnoyIndex
from intervaltree import IntervalTree
from itertools import cycle, islice
import numpy as np
import operator
import random
import scipy
from scipy.sparse import csc_matrix, csr_matrix, vstack
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import rbf_kernel, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import sys
import warnings
from .t_sne_approx import TSNEApprox
from .utils import plt, dispersion, reduce_dimensionality
from .utils import visualize_cluster, visualize_expr, visualize_dropout
from .utils import handle_zeros_in_scale
np.random.seed(0)
random.seed(0)
# Default parameters.
ALPHA = 0.10
APPROX = True
BATCH_SIZE = 5000
DIMRED = 100
HVG = None
KNN = 20
N_ITER = 500
PERPLEXITY = 1200
SIGMA = 15
VERBOSE = 2
# Do batch correction on a list of data sets.
def correct(datasets_full, genes_list, return_dimred=False,
batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,
dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,
return_dense=False, hvg=None, union=False,
geosketch=False, geosketch_max=20000):
"""Integrate and batch correct a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
return_dimred: `bool`, optional (default: `False`)
In addition to returning batch corrected matrices, also returns
integrated low-dimesional embeddings.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
return_dense: `bool`, optional (default: `False`)
Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
corrected, genes
By default (`return_dimred=False`), returns a two-tuple containing a
list of `scipy.sparse.csr_matrix` each with batch corrected values,
and a single list of genes containing the intersection of inputted
genes.
integrated, corrected, genes
When `return_dimred=False`, returns a three-tuple containing a list
of `numpy.ndarray` with integrated low dimensional embeddings, a list
of `scipy.sparse.csr_matrix` each with batch corrected values, and a
a single list of genes containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
expr_datasets=datasets, # Modified in place.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
if return_dense:
datasets = [ ds.toarray() for ds in datasets ]
if return_dimred:
return datasets_dimred, datasets, genes
return datasets, genes
# Integrate a list of data sets.
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE,
verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX,
sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False,
geosketch_max=20000, n_iter=1, union=False, hvg=None):
"""Integrate a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
integrated, genes
Returns a two-tuple containing a list of `numpy.ndarray` with
integrated low dimensional embeddings and a single list of genes
containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
for _ in range(n_iter):
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
return datasets_dimred, genes
# Batch correction with scanpy's AnnData object.
# Integration with scanpy's AnnData object.
def integrate_scanpy(adatas, **kwargs):
"""Integrate a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate.
kwargs : `dict`
See documentation for the `integrate()` method for a full list of
parameters to use for batch correction.
Returns
-------
integrated
Returns a list of `np.ndarray` with integrated low-dimensional
embeddings.
"""
datasets_dimred, genes = integrate(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
return datasets_dimred
# Visualize a scatter plot with cluster labels in the
# `cluster' variable.
def plot_clusters(coords, clusters, s=1):
if coords.shape[0] != clusters.shape[0]:
sys.stderr.write(
'Error: mismatch, {} cells, {} labels\n'
.format(coords.shape[0], clusters.shape[0])
)
exit(1)
colors = np.array(
list(islice(cycle([
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00',
'#ffe119', '#e6194b', '#ffbea3',
'#911eb4', '#46f0f0', '#f032e6',
'#d2f53c', '#008080', '#e6beff',
'#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080',
'#808080', '#fabebe', '#a3f4ff'
]), int(max(clusters) + 1)))
)
plt.figure()
plt.scatter(coords[:, 0], coords[:, 1],
c=colors[clusters], s=s)
# Put datasets into a single matrix with the intersection of all genes.
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False):
if union:
sys.stderr.write(
'WARNING: Integrating based on the union of genes is '
'highly discouraged, consider taking the intersection '
'or requantifying gene expression.\n'
)
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing data set {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [ idx for idx in gene_sort_idx
if uniq_genes[idx] in keep_genes ]
datasets[i] = datasets[i][:, gene_idx]
assert(np.array_equal(uniq_genes[gene_idx], ret_genes))
return datasets, ret_genes
def check_datasets(datasets_full):
datasets_new = []
for i, ds in enumerate(datasets_full):
if issubclass(type(ds), np.ndarray):
datasets_new.append(csr_matrix(ds))
elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):
datasets_new.append(ds)
else:
sys.stderr.write('ERROR: Data sets must be numpy array or '
'scipy.sparse.csr_matrix, received type '
'{}.\n'.format(type(ds)))
exit(1)
return datasets_new
# Randomized SVD.
def dimensionality_reduce(datasets, dimred=DIMRED):
X = vstack(datasets)
X = reduce_dimensionality(X, dim_red_k=dimred)
datasets_dimred = []
base = 0
for ds in datasets:
datasets_dimred.append(X[base:(base + ds.shape[0]), :])
base += ds.shape[0]
return datasets_dimred
# Normalize and reduce dimensionality.
def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):
# Only keep highly variable genes
if not hvg is None and hvg > 0 and hvg < len(genes):
if verbose:
print('Highly variable filter...')
X = vstack(datasets)
disp = dispersion(X)
highest_disp_idx = np.argsort(disp[0])[::-1]
top_genes = set(genes[highest_disp_idx[range(hvg)]])
for i in range(len(datasets)):
gene_idx = [ idx for idx, g_i in enumerate(genes)
if g_i in top_genes ]
datasets[i] = datasets[i][:, gene_idx]
genes = np.array(sorted(top_genes))
# Normalize.
if verbose:
print('Normalizing...')
for i, ds in enumerate(datasets):
datasets[i] = normalize(ds, axis=1)
# Compute compressed embedding.
if dimred > 0:
if verbose:
print('Reducing dimension...')
datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)
if verbose:
print('Done processing.')
return datasets_dimred, genes
if verbose:
print('Done processing.')
return datasets, genes
# Plot t-SNE visualization.
def visualize(assembled, labels, namespace, data_names,
gene_names=None, gene_expr=None, genes=None,
n_iter=N_ITER, perplexity=PERPLEXITY, verbose=VERBOSE,
learn_rate=200., early_exag=12., embedding=None,
shuffle_ds=False, size=1, multicore_tsne=True,
image_suffix='.svg', viz_cluster=False):
# Fit t-SNE.
if embedding is None:
try:
from MulticoreTSNE import MulticoreTSNE
tsne = MulticoreTSNE(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag,
n_jobs=40
)
except ImportError:
multicore_tsne = False
if not multicore_tsne:
tsne = TSNEApprox(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag
)
tsne.fit(np.concatenate(assembled))
embedding = tsne.embedding_
if shuffle_ds:
rand_idx = range(embedding.shape[0])
random.shuffle(list(rand_idx))
embedding = embedding[rand_idx, :]
labels = labels[rand_idx]
# Plot clusters together.
plot_clusters(embedding, labels, s=size)
plt.title(('Panorama ({} iter, perplexity: {}, sigma: {}, ' +
'knn: {}, hvg: {}, dimred: {}, approx: {})')
.format(n_iter, perplexity, SIGMA, KNN, HVG,
DIMRED, APPROX))
plt.savefig(namespace + image_suffix, dpi=500)
# Plot clusters individually.
if viz_cluster and not shuffle_ds:
for i in range(len(data_names)):
visualize_cluster(embedding, i, labels,
cluster_name=data_names[i], size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
# Plot gene expression levels.
if (not gene_names is None) and \
(not gene_expr is None) and \
(not genes is None):
if shuffle_ds:
gene_expr = gene_expr[rand_idx, :]
for gene_name in gene_names:
visualize_expr(gene_expr, embedding,
genes, gene_name, size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
return embedding
# Exact nearest neighbors search.
def nn(ds1, ds2, knn=KNN, metric_p=2):
# Find nearest neighbors of first dataset.
nn_ = NearestNeighbors(knn, p=metric_p)
nn_.fit(ds2)
ind = nn_.kneighbors(ds1, return_distance=False)
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Approximate nearest neighbors using locality sensitive hashing.
def nn_approx(ds1, ds2, knn=KNN, metric='manhattan', n_trees=10):
# Build index.
a = AnnoyIndex(ds2.shape[1], metric=metric)
for i in range(ds2.shape[0]):
a.add_item(i, ds2[i, :])
a.build(n_trees)
# Search index.
ind = []
for i in range(ds1.shape[0]):
ind.append(a.get_nns_by_vector(ds1[i, :], knn, search_k=-1))
ind = np.array(ind)
# Match.
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Find mutual nearest neighbors.
def mnn(ds1, ds2, knn=KNN, approx=APPROX):
# Find nearest neighbors in first direction.
if approx:
match1 = nn_approx(ds1, ds2, knn=knn)
else:
match1 = nn(ds1, ds2, knn=knn)
# Find nearest neighbors in second direction.
if approx:
match2 = nn_approx(ds2, ds1, knn=knn)
else:
match2 = nn(ds2, ds1, knn=knn)
# Compute mutual nearest neighbors.
mutual = match1 & set([ (b, a) for a, b in match2 ])
return mutual
# Visualize alignment between two datasets.
def plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind):
tsne = TSNE(n_iter=400, verbose=VERBOSE, random_state=69)
tsne.fit(curr_ds)
plt.figure()
coords_ds = tsne.embedding_[:, :]
coords_ds[:, 1] += 100
plt.scatter(coords_ds[:, 0], coords_ds[:, 1])
tsne.fit(curr_ref)
coords_ref = tsne.embedding_[:, :]
plt.scatter(coords_ref[:, 0], coords_ref[:, 1])
x_list, y_list = [], []
for r_i, c_i in zip(ds_ind, ref_ind):
x_list.append(coords_ds[r_i, 0])
x_list.append(coords_ref[c_i, 0])
x_list.append(None)
y_list.append(coords_ds[r_i, 1])
y_list.append(coords_ref[c_i, 1])
y_list.append(None)
plt.plot(x_list, y_list, 'b-', alpha=0.3)
plt.show()
# Populate a table (in place) that stores mutual nearest neighbors
# between datasets.
def fill_table(table, i, curr_ds, datasets, base_ds=0,
knn=KNN, approx=APPROX):
curr_ref = np.concatenate(datasets)
if approx:
match = nn_approx(curr_ds, curr_ref, knn=knn)
else:
match = nn(curr_ds, curr_ref, knn=knn, metric_p=1)
# Build interval tree.
itree_ds_idx = IntervalTree()
itree_pos_base = IntervalTree()
pos = 0
for j in range(len(datasets)):
n_cells = datasets[j].shape[0]
itree_ds_idx[pos:(pos + n_cells)] = base_ds + j
itree_pos_base[pos:(pos + n_cells)] = pos
pos += n_cells
# Store all mutual nearest neighbors between datasets.
for d, r in match:
interval = itree_ds_idx[r]
assert(len(interval) == 1)
j = interval.pop().data
interval = itree_pos_base[r]
assert(len(interval) == 1)
base = interval.pop().data
if not (i, j) in table:
table[(i, j)] = set()
table[(i, j)].add((d, r - base))
assert(r - base >= 0)
gs_idxs = None
# Fill table of alignment scores.
def find_alignments_table(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
prenormalized=False, geosketch=False,
geosketch_max=20000):
if not prenormalized:
datasets = [ normalize(ds, axis=1) for ds in datasets ]
if geosketch:
# Only match cells in geometric sketches.
from ample import gs, uniform
global gs_idxs
if gs_idxs is None:
gs_idxs = [ uniform(X, geosketch_max, replace=False)
if X.shape[0] > geosketch_max else range(X.shape[0])
for X in datasets ]
datasets = [ datasets[i][gs_idx, :] for i, gs_idx in enumerate(gs_idxs) ]
table = {}
for i in range(len(datasets)):
if len(datasets[:i]) > 0:
fill_table(table, i, datasets[i], datasets[:i], knn=knn,
approx=approx)
if len(datasets[i+1:]) > 0:
fill_table(table, i, datasets[i], datasets[i+1:],
knn=knn, base_ds=i+1, approx=approx)
# Count all mutual nearest neighbors between datasets.
matches = {}
table1 = {}
if verbose > 1:
table_print = np.zeros((len(datasets), len(datasets)))
for i in range(len(datasets)):
for j in range(len(datasets)):
if i >= j:
continue
if not (i, j) in table or not (j, i) in table:
continue
match_ij = table[(i, j)]
match_ji = set([ (b, a) for a, b in table[(j, i)] ])
matches[(i, j)] = match_ij & match_ji
table1[(i, j)] = (max(
float(len(set([ idx for idx, _ in matches[(i, j)] ]))) /
datasets[i].shape[0],
float(len(set([ idx for _, idx in matches[(i, j)] ]))) /
datasets[j].shape[0]
))
if verbose > 1:
table_print[i, j] += table1[(i, j)]
if geosketch:
# Translate matches within geometric sketches to original indices.
matches_mnn = matches[(i, j)]
matches[(i, j)] = [
(gs_idxs[i][a], gs_idxs[j][b]) for a, b in matches_mnn
]
if verbose > 1:
print(table_print)
return table1, table_print, matches
else:
return table1, None, matches
# Find the matching pairs of cells between datasets.
def find_alignments(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
alpha=ALPHA, prenormalized=False,
geosketch=False, geosketch_max=20000):
table1, _, matches = find_alignments_table(
datasets, knn=knn, approx=approx, verbose=verbose,
prenormalized=prenormalized,
geosketch=geosketch, geosketch_max=geosketch_max
)
alignments = [ (i, j) for (i, j), val in reversed(
sorted(table1.items(), key=operator.itemgetter(1))
) if val > alpha ]
return alignments, matches
# Find connections between datasets to identify panoramas.
def connect(datasets, knn=KNN, approx=APPROX, alpha=ALPHA,
verbose=VERBOSE):
# Find alignments.
alignments, _ = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha,
verbose=verbose
)
if verbose:
print(alignments)
panoramas = []
connected = set()
for i, j in alignments:
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
if len(panoramas_i) == 0:
panoramas_j[0].append(i)
elif len(panoramas_j) == 0:
panoramas_i[0].append(j)
elif panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
connected.add(i)
connected.add(j)
for i in range(len(datasets)):
if not i in connected:
panoramas.append([ i ])
return panoramas
# To reduce memory usage, split bias computation into batches.
def batch_bias(curr_ds, match_ds, bias, batch_size=None, sigma=SIGMA):
if batch_size is None:
weights = rbf_kernel(curr_ds, match_ds, gamma=0.5*sigma)
weights = normalize(weights, axis=1, norm='l1')
avg_bias = np.dot(weights, bias)
return avg_bias
base = 0
avg_bias = np.zeros(curr_ds.shape)
denom = np.zeros(curr_ds.shape[0])
while base < match_ds.shape[0]:
batch_idx = range(
base, min(base + batch_size, match_ds.shape[0])
)
weights = rbf_kernel(curr_ds, match_ds[batch_idx, :],
gamma=0.5*sigma)
avg_bias += np.dot(weights, bias[batch_idx, :])
denom += np.sum(weights, axis=1)
base += batch_size
denom = handle_zeros_in_scale(denom, copy=False)
avg_bias /= denom[:, np.newaxis]
return avg_bias
# Compute nonlinear translation vectors between dataset
# and a reference.
def transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=SIGMA, cn=False,
batch_size=None):
# Compute the matching.
match_ds = curr_ds[ds_ind, :]
match_ref = curr_ref[ref_ind, :]
bias = match_ref - match_ds
if cn:
match_ds = match_ds.toarray()
curr_ds = curr_ds.toarray()
bias = bias.toarray()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
avg_bias = batch_bias(curr_ds, match_ds, bias, sigma=sigma,
batch_size=batch_size)
except RuntimeWarning:
sys.stderr.write('WARNING: Oversmoothing detected, refusing to batch '
'correct, consider lowering sigma value.\n')
return csr_matrix(curr_ds.shape, dtype=float)
except MemoryError:
if batch_size is None:
sys.stderr.write('WARNING: Out of memory, consider turning on '
'batched computation with batch_size parameter.\n')
else:
sys.stderr.write('WARNING: Out of memory, consider lowering '
'the batch_size parameter.\n')
return csr_matrix(curr_ds.shape, dtype=float)
if cn:
avg_bias = csr_matrix(avg_bias)
return avg_bias
# Finds alignments between datasets and uses them to construct
# panoramas. "Merges" datasets by correcting gene expression
# values.
def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,
sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,
ds_names=None, batch_size=None,
geosketch=False, geosketch_max=20000, alignments=None, matches=None):
if len(datasets) == 1:
return datasets
if alignments is None and matches is None:
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,
geosketch=geosketch, geosketch_max=geosketch_max
)
ds_assembled = {}
panoramas = []
for i, j in alignments:
if verbose:
if ds_names is None:
print('Processing datasets {}'.format((i, j)))
else:
print('Processing datasets {} <=> {}'.
format(ds_names[i], ds_names[j]))
# Only consider a dataset a fixed amount of times.
if not i in ds_assembled:
ds_assembled[i] = 0
ds_assembled[i] += 1
if not j in ds_assembled:
ds_assembled[j] = 0
ds_assembled[j] += 1
if ds_assembled[i] > 3 and ds_assembled[j] > 3:
continue
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
if datasets[i].shape[0] < datasets[j].shape[0]:
i, j = j, i
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
# Map dataset i to panorama j.
if len(panoramas_i) == 0:
curr_ds = datasets[i]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
match = []
base = 0
for p in panoramas_j[0]:
if i < p and (i, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(i, p)] ])
elif i > p and (p, i) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[i] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[i]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
expr_datasets[i] = curr_ds + bias
panoramas_j[0].append(i)
# Map dataset j to panorama i.
elif len(panoramas_j) == 0:
curr_ds = datasets[j]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
match = []
base = 0
for p in panoramas_i[0]:
if j < p and (j, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(j, p)] ])
elif j > p and (p, j) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[j]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
cn=True, batch_size=batch_size)
expr_datasets[j] = curr_ds + bias
panoramas_i[0].append(j)
# Merge two panoramas together.
else:
curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
# Find base indices into each panorama.
base_i = 0
for p in panoramas_i[0]:
if p == i: break
base_i += datasets[p].shape[0]
base_j = 0
for p in panoramas_j[0]:
if p == j: break
base_j += datasets[p].shape[0]
# Find matching indices.
match = []
base = 0
for p in panoramas_i[0]:
if p == i and j < p and (j, p) in matches:
match.extend([ (b + base, a + base_j)
for a, b in matches[(j, p)] ])
elif p == i and j > p and (p, j) in matches:
match.extend([ (a + base, b + base_j)
for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
base = 0
for p in panoramas_j[0]:
if p == j and i < p and (i, p) in matches:
match.extend([ (a + base_i, b + base)
for a, b in matches[(i, p)] ])
elif p == j and i > p and (p, i) in matches:
match.extend([ (b + base_i, a + base)
for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
# Apply transformation to entire panorama.
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = datasets[p].shape[0]
datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
if not expr_datasets is None:
curr_ds = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = expr_datasets[p].shape[0]
expr_datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
# Merge panoramas i and j and delete one.
if panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
# Visualize.
if view_match:
plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)
return datasets
# Non-optimal dataset assembly. Simply accumulate datasets into a
# reference.
def assemble_accum(datasets, verbose=VERBOSE, knn=KNN, sigma=SIGMA,
approx=APPROX, batch_size=None):
if len(datasets) == 1:
return datasets
for i in range(len(datasets) - 1):
j = i + 1
if verbose:
print('Processing datasets {}'.format((i, j)))
ds1 = datasets[j]
ds2 = np.concatenate(datasets[:i+1])
match = mnn(ds1, ds2, knn=knn, approx=approx)
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(ds1, ds2, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = ds1 + bias
return datasets
def interpret_alignments(datasets, expr_datasets, genes,
verbose=VERBOSE, knn=KNN, approx=APPROX,
alpha=ALPHA, n_permutations=None):
if n_permutations is None:
n_permutations = float(len(genes) * 30)
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose
)
for i, j in alignments:
# Compute average bias vector that aligns two datasets together.
ds_i = expr_datasets[i]
ds_j = expr_datasets[j]
if i < j:
match = matches[(i, j)]
else:
match = matches[(j, i)]
i_ind = [ a for a, _ in match ]
j_ind = [ b for _, b in match ]
avg_bias = np.absolute(
np.mean(ds_j[j_ind, :] - ds_i[i_ind, :], axis=0)
)
# Construct null distribution and compute p-value.
null_bias = (
ds_j[np.random.randint(ds_j.shape[0], size=n_permutations), :] -
ds_i[np.random.randint(ds_i.shape[0], size=n_permutations), :]
)
p = ((np.sum(np.greater_equal(
np.absolute(np.tile(avg_bias, (n_permutations, 1))),
np.absolute(null_bias)
), axis=0, dtype=float) + 1) / (n_permutations + 1))
print('>>>> Stats for alignment {}'.format((i, j)))
for k in range(len(p)):
print('{}\t{}'.format(genes[k], p[k]))
|
brianhie/scanorama
|
scanorama/scanorama.py
|
integrate_scanpy
|
python
|
def integrate_scanpy(adatas, **kwargs):
datasets_dimred, genes = integrate(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
return datasets_dimred
|
Integrate a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate.
kwargs : `dict`
See documentation for the `integrate()` method for a full list of
parameters to use for batch correction.
Returns
-------
integrated
Returns a list of `np.ndarray` with integrated low-dimensional
embeddings.
|
train
|
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L219-L242
|
[
"def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE,\n verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX,\n sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False,\n geosketch_max=20000, n_iter=1, union=False, hvg=None):\n \"\"\"Integrate a list of data sets.\n\n Parameters\n ----------\n datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`\n Data sets to integrate and correct.\n genes_list: `list` of `list` of `string`\n List of genes for each data set.\n batch_size: `int`, optional (default: `5000`)\n The batch size used in the alignment vector computation. Useful when\n correcting very large (>100k samples) data sets. Set to large value\n that runs within available memory.\n verbose: `bool` or `int`, optional (default: 2)\n When `True` or not equal to 0, prints logging output.\n ds_names: `list` of `string`, optional\n When `verbose=True`, reports data set names in logging output.\n dimred: `int`, optional (default: 100)\n Dimensionality of integrated embedding.\n approx: `bool`, optional (default: `True`)\n Use approximate nearest neighbors, greatly speeds up matching runtime.\n sigma: `float`, optional (default: 15)\n Correction smoothing parameter on Gaussian kernel.\n alpha: `float`, optional (default: 0.10)\n Alignment score minimum cutoff.\n knn: `int`, optional (default: 20)\n Number of nearest neighbors to use for matching.\n hvg: `int`, optional (default: None)\n Use this number of top highly variable genes based on dispersion.\n\n Returns\n -------\n integrated, genes\n Returns a two-tuple containing a list of `numpy.ndarray` with\n integrated low dimensional embeddings and a single list of genes\n containing the intersection of inputted genes.\n \"\"\"\n datasets_full = check_datasets(datasets_full)\n\n datasets, genes = merge_datasets(datasets_full, genes_list,\n ds_names=ds_names, union=union)\n datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,\n dimred=dimred)\n\n for _ in range(n_iter):\n datasets_dimred = assemble(\n datasets_dimred, # Assemble in low dimensional space.\n verbose=verbose, knn=knn, sigma=sigma, approx=approx,\n alpha=alpha, ds_names=ds_names, batch_size=batch_size,\n geosketch=geosketch, geosketch_max=geosketch_max,\n )\n\n return datasets_dimred, genes\n"
] |
from annoy import AnnoyIndex
from intervaltree import IntervalTree
from itertools import cycle, islice
import numpy as np
import operator
import random
import scipy
from scipy.sparse import csc_matrix, csr_matrix, vstack
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import rbf_kernel, euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import sys
import warnings
from .t_sne_approx import TSNEApprox
from .utils import plt, dispersion, reduce_dimensionality
from .utils import visualize_cluster, visualize_expr, visualize_dropout
from .utils import handle_zeros_in_scale
np.random.seed(0)
random.seed(0)
# Default parameters.
ALPHA = 0.10
APPROX = True
BATCH_SIZE = 5000
DIMRED = 100
HVG = None
KNN = 20
N_ITER = 500
PERPLEXITY = 1200
SIGMA = 15
VERBOSE = 2
# Do batch correction on a list of data sets.
def correct(datasets_full, genes_list, return_dimred=False,
batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None,
dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN,
return_dense=False, hvg=None, union=False,
geosketch=False, geosketch_max=20000):
"""Integrate and batch correct a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
return_dimred: `bool`, optional (default: `False`)
In addition to returning batch corrected matrices, also returns
integrated low-dimesional embeddings.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
return_dense: `bool`, optional (default: `False`)
Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
corrected, genes
By default (`return_dimred=False`), returns a two-tuple containing a
list of `scipy.sparse.csr_matrix` each with batch corrected values,
and a single list of genes containing the intersection of inputted
genes.
integrated, corrected, genes
When `return_dimred=False`, returns a three-tuple containing a list
of `numpy.ndarray` with integrated low dimensional embeddings, a list
of `scipy.sparse.csr_matrix` each with batch corrected values, and a
a single list of genes containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
expr_datasets=datasets, # Modified in place.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
if return_dense:
datasets = [ ds.toarray() for ds in datasets ]
if return_dimred:
return datasets_dimred, datasets, genes
return datasets, genes
# Integrate a list of data sets.
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE,
verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX,
sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False,
geosketch_max=20000, n_iter=1, union=False, hvg=None):
"""Integrate a list of data sets.
Parameters
----------
datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray`
Data sets to integrate and correct.
genes_list: `list` of `list` of `string`
List of genes for each data set.
batch_size: `int`, optional (default: `5000`)
The batch size used in the alignment vector computation. Useful when
correcting very large (>100k samples) data sets. Set to large value
that runs within available memory.
verbose: `bool` or `int`, optional (default: 2)
When `True` or not equal to 0, prints logging output.
ds_names: `list` of `string`, optional
When `verbose=True`, reports data set names in logging output.
dimred: `int`, optional (default: 100)
Dimensionality of integrated embedding.
approx: `bool`, optional (default: `True`)
Use approximate nearest neighbors, greatly speeds up matching runtime.
sigma: `float`, optional (default: 15)
Correction smoothing parameter on Gaussian kernel.
alpha: `float`, optional (default: 0.10)
Alignment score minimum cutoff.
knn: `int`, optional (default: 20)
Number of nearest neighbors to use for matching.
hvg: `int`, optional (default: None)
Use this number of top highly variable genes based on dispersion.
Returns
-------
integrated, genes
Returns a two-tuple containing a list of `numpy.ndarray` with
integrated low dimensional embeddings and a single list of genes
containing the intersection of inputted genes.
"""
datasets_full = check_datasets(datasets_full)
datasets, genes = merge_datasets(datasets_full, genes_list,
ds_names=ds_names, union=union)
datasets_dimred, genes = process_data(datasets, genes, hvg=hvg,
dimred=dimred)
for _ in range(n_iter):
datasets_dimred = assemble(
datasets_dimred, # Assemble in low dimensional space.
verbose=verbose, knn=knn, sigma=sigma, approx=approx,
alpha=alpha, ds_names=ds_names, batch_size=batch_size,
geosketch=geosketch, geosketch_max=geosketch_max,
)
return datasets_dimred, genes
# Batch correction with scanpy's AnnData object.
def correct_scanpy(adatas, **kwargs):
"""Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
"""
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas
# Integration with scanpy's AnnData object.
# Visualize a scatter plot with cluster labels in the
# `cluster' variable.
def plot_clusters(coords, clusters, s=1):
if coords.shape[0] != clusters.shape[0]:
sys.stderr.write(
'Error: mismatch, {} cells, {} labels\n'
.format(coords.shape[0], clusters.shape[0])
)
exit(1)
colors = np.array(
list(islice(cycle([
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00',
'#ffe119', '#e6194b', '#ffbea3',
'#911eb4', '#46f0f0', '#f032e6',
'#d2f53c', '#008080', '#e6beff',
'#aa6e28', '#800000', '#aaffc3',
'#808000', '#ffd8b1', '#000080',
'#808080', '#fabebe', '#a3f4ff'
]), int(max(clusters) + 1)))
)
plt.figure()
plt.scatter(coords[:, 0], coords[:, 1],
c=colors[clusters], s=s)
# Put datasets into a single matrix with the intersection of all genes.
def merge_datasets(datasets, genes, ds_names=None, verbose=True,
union=False):
if union:
sys.stderr.write(
'WARNING: Integrating based on the union of genes is '
'highly discouraged, consider taking the intersection '
'or requantifying gene expression.\n'
)
# Find genes in common.
keep_genes = set()
for idx, gene_list in enumerate(genes):
if len(keep_genes) == 0:
keep_genes = set(gene_list)
elif union:
keep_genes |= set(gene_list)
else:
keep_genes &= set(gene_list)
if not union and not ds_names is None and verbose:
print('After {}: {} genes'.format(ds_names[idx], len(keep_genes)))
if len(keep_genes) == 0:
print('Error: No genes found in all datasets, exiting...')
exit(1)
if verbose:
print('Found {} genes among all datasets'
.format(len(keep_genes)))
if union:
union_genes = sorted(keep_genes)
for i in range(len(datasets)):
if verbose:
print('Processing data set {}'.format(i))
X_new = np.zeros((datasets[i].shape[0], len(union_genes)))
X_old = csc_matrix(datasets[i])
gene_to_idx = { gene: idx for idx, gene in enumerate(genes[i]) }
for j, gene in enumerate(union_genes):
if gene in gene_to_idx:
X_new[:, j] = X_old[:, gene_to_idx[gene]].toarray().flatten()
datasets[i] = csr_matrix(X_new)
ret_genes = np.array(union_genes)
else:
# Only keep genes in common.
ret_genes = np.array(sorted(keep_genes))
for i in range(len(datasets)):
# Remove duplicate genes.
uniq_genes, uniq_idx = np.unique(genes[i], return_index=True)
datasets[i] = datasets[i][:, uniq_idx]
# Do gene filtering.
gene_sort_idx = np.argsort(uniq_genes)
gene_idx = [ idx for idx in gene_sort_idx
if uniq_genes[idx] in keep_genes ]
datasets[i] = datasets[i][:, gene_idx]
assert(np.array_equal(uniq_genes[gene_idx], ret_genes))
return datasets, ret_genes
def check_datasets(datasets_full):
datasets_new = []
for i, ds in enumerate(datasets_full):
if issubclass(type(ds), np.ndarray):
datasets_new.append(csr_matrix(ds))
elif issubclass(type(ds), scipy.sparse.csr.csr_matrix):
datasets_new.append(ds)
else:
sys.stderr.write('ERROR: Data sets must be numpy array or '
'scipy.sparse.csr_matrix, received type '
'{}.\n'.format(type(ds)))
exit(1)
return datasets_new
# Randomized SVD.
def dimensionality_reduce(datasets, dimred=DIMRED):
X = vstack(datasets)
X = reduce_dimensionality(X, dim_red_k=dimred)
datasets_dimred = []
base = 0
for ds in datasets:
datasets_dimred.append(X[base:(base + ds.shape[0]), :])
base += ds.shape[0]
return datasets_dimred
# Normalize and reduce dimensionality.
def process_data(datasets, genes, hvg=HVG, dimred=DIMRED, verbose=False):
# Only keep highly variable genes
if not hvg is None and hvg > 0 and hvg < len(genes):
if verbose:
print('Highly variable filter...')
X = vstack(datasets)
disp = dispersion(X)
highest_disp_idx = np.argsort(disp[0])[::-1]
top_genes = set(genes[highest_disp_idx[range(hvg)]])
for i in range(len(datasets)):
gene_idx = [ idx for idx, g_i in enumerate(genes)
if g_i in top_genes ]
datasets[i] = datasets[i][:, gene_idx]
genes = np.array(sorted(top_genes))
# Normalize.
if verbose:
print('Normalizing...')
for i, ds in enumerate(datasets):
datasets[i] = normalize(ds, axis=1)
# Compute compressed embedding.
if dimred > 0:
if verbose:
print('Reducing dimension...')
datasets_dimred = dimensionality_reduce(datasets, dimred=dimred)
if verbose:
print('Done processing.')
return datasets_dimred, genes
if verbose:
print('Done processing.')
return datasets, genes
# Plot t-SNE visualization.
def visualize(assembled, labels, namespace, data_names,
gene_names=None, gene_expr=None, genes=None,
n_iter=N_ITER, perplexity=PERPLEXITY, verbose=VERBOSE,
learn_rate=200., early_exag=12., embedding=None,
shuffle_ds=False, size=1, multicore_tsne=True,
image_suffix='.svg', viz_cluster=False):
# Fit t-SNE.
if embedding is None:
try:
from MulticoreTSNE import MulticoreTSNE
tsne = MulticoreTSNE(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag,
n_jobs=40
)
except ImportError:
multicore_tsne = False
if not multicore_tsne:
tsne = TSNEApprox(
n_iter=n_iter, perplexity=perplexity,
verbose=verbose, random_state=69,
learning_rate=learn_rate,
early_exaggeration=early_exag
)
tsne.fit(np.concatenate(assembled))
embedding = tsne.embedding_
if shuffle_ds:
rand_idx = range(embedding.shape[0])
random.shuffle(list(rand_idx))
embedding = embedding[rand_idx, :]
labels = labels[rand_idx]
# Plot clusters together.
plot_clusters(embedding, labels, s=size)
plt.title(('Panorama ({} iter, perplexity: {}, sigma: {}, ' +
'knn: {}, hvg: {}, dimred: {}, approx: {})')
.format(n_iter, perplexity, SIGMA, KNN, HVG,
DIMRED, APPROX))
plt.savefig(namespace + image_suffix, dpi=500)
# Plot clusters individually.
if viz_cluster and not shuffle_ds:
for i in range(len(data_names)):
visualize_cluster(embedding, i, labels,
cluster_name=data_names[i], size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
# Plot gene expression levels.
if (not gene_names is None) and \
(not gene_expr is None) and \
(not genes is None):
if shuffle_ds:
gene_expr = gene_expr[rand_idx, :]
for gene_name in gene_names:
visualize_expr(gene_expr, embedding,
genes, gene_name, size=size,
viz_prefix=namespace,
image_suffix=image_suffix)
return embedding
# Exact nearest neighbors search.
def nn(ds1, ds2, knn=KNN, metric_p=2):
# Find nearest neighbors of first dataset.
nn_ = NearestNeighbors(knn, p=metric_p)
nn_.fit(ds2)
ind = nn_.kneighbors(ds1, return_distance=False)
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Approximate nearest neighbors using locality sensitive hashing.
def nn_approx(ds1, ds2, knn=KNN, metric='manhattan', n_trees=10):
# Build index.
a = AnnoyIndex(ds2.shape[1], metric=metric)
for i in range(ds2.shape[0]):
a.add_item(i, ds2[i, :])
a.build(n_trees)
# Search index.
ind = []
for i in range(ds1.shape[0]):
ind.append(a.get_nns_by_vector(ds1[i, :], knn, search_k=-1))
ind = np.array(ind)
# Match.
match = set()
for a, b in zip(range(ds1.shape[0]), ind):
for b_i in b:
match.add((a, b_i))
return match
# Find mutual nearest neighbors.
def mnn(ds1, ds2, knn=KNN, approx=APPROX):
# Find nearest neighbors in first direction.
if approx:
match1 = nn_approx(ds1, ds2, knn=knn)
else:
match1 = nn(ds1, ds2, knn=knn)
# Find nearest neighbors in second direction.
if approx:
match2 = nn_approx(ds2, ds1, knn=knn)
else:
match2 = nn(ds2, ds1, knn=knn)
# Compute mutual nearest neighbors.
mutual = match1 & set([ (b, a) for a, b in match2 ])
return mutual
# Visualize alignment between two datasets.
def plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind):
tsne = TSNE(n_iter=400, verbose=VERBOSE, random_state=69)
tsne.fit(curr_ds)
plt.figure()
coords_ds = tsne.embedding_[:, :]
coords_ds[:, 1] += 100
plt.scatter(coords_ds[:, 0], coords_ds[:, 1])
tsne.fit(curr_ref)
coords_ref = tsne.embedding_[:, :]
plt.scatter(coords_ref[:, 0], coords_ref[:, 1])
x_list, y_list = [], []
for r_i, c_i in zip(ds_ind, ref_ind):
x_list.append(coords_ds[r_i, 0])
x_list.append(coords_ref[c_i, 0])
x_list.append(None)
y_list.append(coords_ds[r_i, 1])
y_list.append(coords_ref[c_i, 1])
y_list.append(None)
plt.plot(x_list, y_list, 'b-', alpha=0.3)
plt.show()
# Populate a table (in place) that stores mutual nearest neighbors
# between datasets.
def fill_table(table, i, curr_ds, datasets, base_ds=0,
knn=KNN, approx=APPROX):
curr_ref = np.concatenate(datasets)
if approx:
match = nn_approx(curr_ds, curr_ref, knn=knn)
else:
match = nn(curr_ds, curr_ref, knn=knn, metric_p=1)
# Build interval tree.
itree_ds_idx = IntervalTree()
itree_pos_base = IntervalTree()
pos = 0
for j in range(len(datasets)):
n_cells = datasets[j].shape[0]
itree_ds_idx[pos:(pos + n_cells)] = base_ds + j
itree_pos_base[pos:(pos + n_cells)] = pos
pos += n_cells
# Store all mutual nearest neighbors between datasets.
for d, r in match:
interval = itree_ds_idx[r]
assert(len(interval) == 1)
j = interval.pop().data
interval = itree_pos_base[r]
assert(len(interval) == 1)
base = interval.pop().data
if not (i, j) in table:
table[(i, j)] = set()
table[(i, j)].add((d, r - base))
assert(r - base >= 0)
gs_idxs = None
# Fill table of alignment scores.
def find_alignments_table(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
prenormalized=False, geosketch=False,
geosketch_max=20000):
if not prenormalized:
datasets = [ normalize(ds, axis=1) for ds in datasets ]
if geosketch:
# Only match cells in geometric sketches.
from ample import gs, uniform
global gs_idxs
if gs_idxs is None:
gs_idxs = [ uniform(X, geosketch_max, replace=False)
if X.shape[0] > geosketch_max else range(X.shape[0])
for X in datasets ]
datasets = [ datasets[i][gs_idx, :] for i, gs_idx in enumerate(gs_idxs) ]
table = {}
for i in range(len(datasets)):
if len(datasets[:i]) > 0:
fill_table(table, i, datasets[i], datasets[:i], knn=knn,
approx=approx)
if len(datasets[i+1:]) > 0:
fill_table(table, i, datasets[i], datasets[i+1:],
knn=knn, base_ds=i+1, approx=approx)
# Count all mutual nearest neighbors between datasets.
matches = {}
table1 = {}
if verbose > 1:
table_print = np.zeros((len(datasets), len(datasets)))
for i in range(len(datasets)):
for j in range(len(datasets)):
if i >= j:
continue
if not (i, j) in table or not (j, i) in table:
continue
match_ij = table[(i, j)]
match_ji = set([ (b, a) for a, b in table[(j, i)] ])
matches[(i, j)] = match_ij & match_ji
table1[(i, j)] = (max(
float(len(set([ idx for idx, _ in matches[(i, j)] ]))) /
datasets[i].shape[0],
float(len(set([ idx for _, idx in matches[(i, j)] ]))) /
datasets[j].shape[0]
))
if verbose > 1:
table_print[i, j] += table1[(i, j)]
if geosketch:
# Translate matches within geometric sketches to original indices.
matches_mnn = matches[(i, j)]
matches[(i, j)] = [
(gs_idxs[i][a], gs_idxs[j][b]) for a, b in matches_mnn
]
if verbose > 1:
print(table_print)
return table1, table_print, matches
else:
return table1, None, matches
# Find the matching pairs of cells between datasets.
def find_alignments(datasets, knn=KNN, approx=APPROX, verbose=VERBOSE,
alpha=ALPHA, prenormalized=False,
geosketch=False, geosketch_max=20000):
table1, _, matches = find_alignments_table(
datasets, knn=knn, approx=approx, verbose=verbose,
prenormalized=prenormalized,
geosketch=geosketch, geosketch_max=geosketch_max
)
alignments = [ (i, j) for (i, j), val in reversed(
sorted(table1.items(), key=operator.itemgetter(1))
) if val > alpha ]
return alignments, matches
# Find connections between datasets to identify panoramas.
def connect(datasets, knn=KNN, approx=APPROX, alpha=ALPHA,
verbose=VERBOSE):
# Find alignments.
alignments, _ = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha,
verbose=verbose
)
if verbose:
print(alignments)
panoramas = []
connected = set()
for i, j in alignments:
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
if len(panoramas_i) == 0:
panoramas_j[0].append(i)
elif len(panoramas_j) == 0:
panoramas_i[0].append(j)
elif panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
connected.add(i)
connected.add(j)
for i in range(len(datasets)):
if not i in connected:
panoramas.append([ i ])
return panoramas
# To reduce memory usage, split bias computation into batches.
def batch_bias(curr_ds, match_ds, bias, batch_size=None, sigma=SIGMA):
if batch_size is None:
weights = rbf_kernel(curr_ds, match_ds, gamma=0.5*sigma)
weights = normalize(weights, axis=1, norm='l1')
avg_bias = np.dot(weights, bias)
return avg_bias
base = 0
avg_bias = np.zeros(curr_ds.shape)
denom = np.zeros(curr_ds.shape[0])
while base < match_ds.shape[0]:
batch_idx = range(
base, min(base + batch_size, match_ds.shape[0])
)
weights = rbf_kernel(curr_ds, match_ds[batch_idx, :],
gamma=0.5*sigma)
avg_bias += np.dot(weights, bias[batch_idx, :])
denom += np.sum(weights, axis=1)
base += batch_size
denom = handle_zeros_in_scale(denom, copy=False)
avg_bias /= denom[:, np.newaxis]
return avg_bias
# Compute nonlinear translation vectors between dataset
# and a reference.
def transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=SIGMA, cn=False,
batch_size=None):
# Compute the matching.
match_ds = curr_ds[ds_ind, :]
match_ref = curr_ref[ref_ind, :]
bias = match_ref - match_ds
if cn:
match_ds = match_ds.toarray()
curr_ds = curr_ds.toarray()
bias = bias.toarray()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
avg_bias = batch_bias(curr_ds, match_ds, bias, sigma=sigma,
batch_size=batch_size)
except RuntimeWarning:
sys.stderr.write('WARNING: Oversmoothing detected, refusing to batch '
'correct, consider lowering sigma value.\n')
return csr_matrix(curr_ds.shape, dtype=float)
except MemoryError:
if batch_size is None:
sys.stderr.write('WARNING: Out of memory, consider turning on '
'batched computation with batch_size parameter.\n')
else:
sys.stderr.write('WARNING: Out of memory, consider lowering '
'the batch_size parameter.\n')
return csr_matrix(curr_ds.shape, dtype=float)
if cn:
avg_bias = csr_matrix(avg_bias)
return avg_bias
# Finds alignments between datasets and uses them to construct
# panoramas. "Merges" datasets by correcting gene expression
# values.
def assemble(datasets, verbose=VERBOSE, view_match=False, knn=KNN,
sigma=SIGMA, approx=APPROX, alpha=ALPHA, expr_datasets=None,
ds_names=None, batch_size=None,
geosketch=False, geosketch_max=20000, alignments=None, matches=None):
if len(datasets) == 1:
return datasets
if alignments is None and matches is None:
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose,
geosketch=geosketch, geosketch_max=geosketch_max
)
ds_assembled = {}
panoramas = []
for i, j in alignments:
if verbose:
if ds_names is None:
print('Processing datasets {}'.format((i, j)))
else:
print('Processing datasets {} <=> {}'.
format(ds_names[i], ds_names[j]))
# Only consider a dataset a fixed amount of times.
if not i in ds_assembled:
ds_assembled[i] = 0
ds_assembled[i] += 1
if not j in ds_assembled:
ds_assembled[j] = 0
ds_assembled[j] += 1
if ds_assembled[i] > 3 and ds_assembled[j] > 3:
continue
# See if datasets are involved in any current panoramas.
panoramas_i = [ panoramas[p] for p in range(len(panoramas))
if i in panoramas[p] ]
assert(len(panoramas_i) <= 1)
panoramas_j = [ panoramas[p] for p in range(len(panoramas))
if j in panoramas[p] ]
assert(len(panoramas_j) <= 1)
if len(panoramas_i) == 0 and len(panoramas_j) == 0:
if datasets[i].shape[0] < datasets[j].shape[0]:
i, j = j, i
panoramas.append([ i ])
panoramas_i = [ panoramas[-1] ]
# Map dataset i to panorama j.
if len(panoramas_i) == 0:
curr_ds = datasets[i]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
match = []
base = 0
for p in panoramas_j[0]:
if i < p and (i, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(i, p)] ])
elif i > p and (p, i) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[i] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[i]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
expr_datasets[i] = curr_ds + bias
panoramas_j[0].append(i)
# Map dataset j to panorama i.
elif len(panoramas_j) == 0:
curr_ds = datasets[j]
curr_ref = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
match = []
base = 0
for p in panoramas_i[0]:
if j < p and (j, p) in matches:
match.extend([ (a, b + base) for a, b in matches[(j, p)] ])
elif j > p and (p, j) in matches:
match.extend([ (b, a + base) for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = curr_ds + bias
if expr_datasets:
curr_ds = expr_datasets[j]
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
cn=True, batch_size=batch_size)
expr_datasets[j] = curr_ds + bias
panoramas_i[0].append(j)
# Merge two panoramas together.
else:
curr_ds = np.concatenate([ datasets[p] for p in panoramas_i[0] ])
curr_ref = np.concatenate([ datasets[p] for p in panoramas_j[0] ])
# Find base indices into each panorama.
base_i = 0
for p in panoramas_i[0]:
if p == i: break
base_i += datasets[p].shape[0]
base_j = 0
for p in panoramas_j[0]:
if p == j: break
base_j += datasets[p].shape[0]
# Find matching indices.
match = []
base = 0
for p in panoramas_i[0]:
if p == i and j < p and (j, p) in matches:
match.extend([ (b + base, a + base_j)
for a, b in matches[(j, p)] ])
elif p == i and j > p and (p, j) in matches:
match.extend([ (a + base, b + base_j)
for a, b in matches[(p, j)] ])
base += datasets[p].shape[0]
base = 0
for p in panoramas_j[0]:
if p == j and i < p and (i, p) in matches:
match.extend([ (a + base_i, b + base)
for a, b in matches[(i, p)] ])
elif p == j and i > p and (p, i) in matches:
match.extend([ (b + base_i, a + base)
for a, b in matches[(p, i)] ])
base += datasets[p].shape[0]
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
# Apply transformation to entire panorama.
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = datasets[p].shape[0]
datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
if not expr_datasets is None:
curr_ds = vstack([ expr_datasets[p]
for p in panoramas_i[0] ])
curr_ref = vstack([ expr_datasets[p]
for p in panoramas_j[0] ])
bias = transform(curr_ds, curr_ref, ds_ind, ref_ind,
sigma=sigma, cn=True, batch_size=batch_size)
curr_ds += bias
base = 0
for p in panoramas_i[0]:
n_cells = expr_datasets[p].shape[0]
expr_datasets[p] = curr_ds[base:(base + n_cells), :]
base += n_cells
# Merge panoramas i and j and delete one.
if panoramas_i[0] != panoramas_j[0]:
panoramas_i[0] += panoramas_j[0]
panoramas.remove(panoramas_j[0])
# Visualize.
if view_match:
plot_mapping(curr_ds, curr_ref, ds_ind, ref_ind)
return datasets
# Non-optimal dataset assembly. Simply accumulate datasets into a
# reference.
def assemble_accum(datasets, verbose=VERBOSE, knn=KNN, sigma=SIGMA,
approx=APPROX, batch_size=None):
if len(datasets) == 1:
return datasets
for i in range(len(datasets) - 1):
j = i + 1
if verbose:
print('Processing datasets {}'.format((i, j)))
ds1 = datasets[j]
ds2 = np.concatenate(datasets[:i+1])
match = mnn(ds1, ds2, knn=knn, approx=approx)
ds_ind = [ a for a, _ in match ]
ref_ind = [ b for _, b in match ]
bias = transform(ds1, ds2, ds_ind, ref_ind, sigma=sigma,
batch_size=batch_size)
datasets[j] = ds1 + bias
return datasets
def interpret_alignments(datasets, expr_datasets, genes,
verbose=VERBOSE, knn=KNN, approx=APPROX,
alpha=ALPHA, n_permutations=None):
if n_permutations is None:
n_permutations = float(len(genes) * 30)
alignments, matches = find_alignments(
datasets, knn=knn, approx=approx, alpha=alpha, verbose=verbose
)
for i, j in alignments:
# Compute average bias vector that aligns two datasets together.
ds_i = expr_datasets[i]
ds_j = expr_datasets[j]
if i < j:
match = matches[(i, j)]
else:
match = matches[(j, i)]
i_ind = [ a for a, _ in match ]
j_ind = [ b for _, b in match ]
avg_bias = np.absolute(
np.mean(ds_j[j_ind, :] - ds_i[i_ind, :], axis=0)
)
# Construct null distribution and compute p-value.
null_bias = (
ds_j[np.random.randint(ds_j.shape[0], size=n_permutations), :] -
ds_i[np.random.randint(ds_i.shape[0], size=n_permutations), :]
)
p = ((np.sum(np.greater_equal(
np.absolute(np.tile(avg_bias, (n_permutations, 1))),
np.absolute(null_bias)
), axis=0, dtype=float) + 1) / (n_permutations + 1))
print('>>>> Stats for alignment {}'.format((i, j)))
for k in range(len(p)):
print('{}\t{}'.format(genes[k], p[k]))
|
chrisspen/weka
|
weka/arff.py
|
convert_weka_to_py_date_pattern
|
python
|
def convert_weka_to_py_date_pattern(p):
# https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
# https://www.cs.waikato.ac.nz/ml/weka/arff.html
p = p.replace('yyyy', r'%Y')
p = p.replace('MM', r'%m')
p = p.replace('dd', r'%d')
p = p.replace('HH', r'%H')
p = p.replace('mm', r'%M')
p = p.replace('ss', r'%S')
return p
|
Converts the date format pattern used by Weka to the date format pattern used by Python's datetime.strftime().
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L87-L99
| null |
# Copyright (c) 2008, Mikio L. Braun, Cheng Soon Ong, Soeren Sonnenburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
2011.3.6 CKS
Fixed regular expression to handle special characters in attribute names.
Added options for parsing and copying only the schema.
2012.11.4 CKS
Added support for streaming and sparse data.
"""
from __future__ import print_function
import os
import sys
import re
import copy
import unittest
import tempfile
from datetime import date, datetime
from decimal import Decimal
from six import StringIO
from six import string_types as basestring # pylint: disable=redefined-builtin
import dateutil.parser
MISSING = '?'
def is_numeric(v):
try:
float(v)
return True
except (TypeError, ValueError):
return False
DENSE = 'dense'
SPARSE = 'sparse'
FORMATS = (DENSE, SPARSE)
TYPE_INTEGER = 'integer'
TYPE_NUMERIC = 'numeric' # float or integer
TYPE_REAL = 'real'
TYPE_STRING = 'string'
TYPE_NOMINAL = 'nominal'
TYPE_DATE = 'date'
TYPES = (
TYPE_INTEGER,
TYPE_NUMERIC,
TYPE_STRING,
TYPE_NOMINAL,
TYPE_DATE,
)
NUMERIC_TYPES = (
TYPE_INTEGER,
TYPE_NUMERIC,
TYPE_REAL,
)
STRIP_QUOTES_REGEX = re.compile('^[\'\"]|[\'\"]$')
#DEFAULT_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss" # Weka docs say this is the default, but using this causes Weka to throw an java.io.IOException: unparseable date
DEFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"
#DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
def cmp(a, b): # pylint: disable=redefined-builtin
return (a > b) - (a < b)
class Value(object):
"""
Base helper class for tagging units of data with an explicit schema type.
"""
__slots__ = ('value', 'cls')
def __init__(self, v, cls=False):
self.value = v
self.cls = cls
def __hash__(self):
#return hash((self.value, self.cls))
return hash(self.value)
def __eq__(self, other):
if isinstance(other, Value):
return self.value == other.value
return NotImplemented
def __cmp__(self, other):
if isinstance(other, Value):
return cmp(self.value, other.value)
return NotImplemented
def __repr__(self):
return repr(self.value)
class Integer(Value):
c_type = TYPE_INTEGER
def __init__(self, v, *args, **kwargs):
if v != MISSING:
v = int(v)
super(Integer, self).__init__(v, *args, **kwargs)
def __add__(self, other):
if isinstance(other, Integer):
return Integer(v=self.value + other.value, cls=self.cls)
elif isinstance(other, (int, float, bool)):
return Integer(v=self.value + other, cls=self.cls)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Integer):
self.value += other.value
return self
elif isinstance(other, (int, float, bool)):
self.value += other
return self
return NotImplemented
Int = Integer
class Numeric(Value):
c_type = TYPE_NUMERIC
def __init__(self, v, *args, **kwargs):
# TODO:causes loss of precision?
if v != MISSING:
v = float(v)
super(Numeric, self).__init__(v, *args, **kwargs)
def __add__(self, other):
if isinstance(other, Numeric):
return Numeric(v=self.value + other.value, cls=self.cls)
elif isinstance(other, (int, float, bool)):
return Numeric(v=self.value + other, cls=self.cls)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Numeric):
self.value += other.value
return self
elif isinstance(other, (int, float, bool)):
self.value += other
return self
return NotImplemented
def __div__(self, other):
if isinstance(other, Numeric):
return Numeric(v=self.value / other.value, cls=self.cls)
elif isinstance(other, (int, float, bool)):
return Numeric(v=self.value / other, cls=self.cls)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Numeric):
return Numeric(v=self.value / other.value, cls=self.cls)
elif isinstance(other, (int, float, bool)):
return Numeric(v=self.value / other, cls=self.cls)
return NotImplemented
def __idiv__(self, other):
if isinstance(other, Numeric):
self.value /= other.value
return self
elif isinstance(other, (int, float, bool)):
self.value /= other
return self
return NotImplemented
def __itruediv__(self, other):
if isinstance(other, Numeric):
self.value /= other.value
return self
elif isinstance(other, (int, float, bool)):
self.value /= other
return self
return NotImplemented
Num = Numeric
class String(Value):
c_type = TYPE_STRING
def __init__(self, v, *args, **kwargs):
v = str(v)
super(String, self).__init__(v, *args, **kwargs)
Str = String
class Nominal(Value):
c_type = TYPE_NOMINAL
Nom = Nominal
class Date(Value):
c_type = TYPE_DATE
Dt = Date
TYPE_TO_CLASS = {
TYPE_INTEGER: Integer,
TYPE_NUMERIC: Numeric,
TYPE_STRING: String,
TYPE_NOMINAL: Nominal,
TYPE_REAL: Numeric,
TYPE_DATE: Date,
}
def wrap_value(v):
if isinstance(v, Value):
return v
if v == MISSING:
return Str(v)
if isinstance(v, basestring):
return Str(v)
try:
return Num(v)
except ValueError:
pass
try:
return Int(v)
except ValueError:
pass
try:
return Date(v)
except ValueError:
pass
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.get_attribute_value
|
python
|
def get_attribute_value(self, name, index):
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
|
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L320-L342
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.load
|
python
|
def load(cls, filename, schema_only=False):
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
|
Load an ARFF File from a file.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L357-L367
|
[
"def parse(cls, s, schema_only=False):\n \"\"\"\n Parse an ARFF File already loaded into a string.\n \"\"\"\n a = cls()\n a.state = 'comment'\n a.lineno = 1\n for l in s.splitlines():\n a.parseline(l)\n a.lineno += 1\n if schema_only and a.state == 'data':\n # Don't parse data if we're only loading the schema.\n break\n return a\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.parse
|
python
|
def parse(cls, s, schema_only=False):
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
|
Parse an ARFF File already loaded into a string.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L370-L383
|
[
"def parseline(self, l):\n if self.state == 'comment':\n if l and l[0] == '%':\n self.comment.append(l[2:])\n else:\n self.comment = '\\n'.join(self.comment)\n self.state = 'in_header'\n self.parseline(l)\n elif self.state == 'in_header':\n ll = l.lower()\n if ll.startswith('@relation '):\n self.__parse_relation(l)\n if ll.startswith('@attribute '):\n self.__parse_attribute(l)\n if ll.startswith('@data'):\n self.state = 'data'\n elif self.state == 'data':\n if l and l[0] != '%':\n self._parse_data(l)\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.copy
|
python
|
def copy(self, schema_only=False):
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
|
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L385-L398
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.open_stream
|
python
|
def open_stream(self, class_attr_name=None, fn=None):
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
|
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L404-L422
|
[
"def write(self,\n fout=None,\n fmt=SPARSE,\n schema_only=False,\n data_only=False):\n \"\"\"\n Write an arff structure to a string.\n \"\"\"\n assert not (schema_only and data_only), 'Make up your mind.'\n assert fmt in FORMATS, 'Invalid format \"%s\". Should be one of: %s' % (fmt, ', '.join(FORMATS))\n close = False\n if fout is None:\n close = True\n fout = StringIO()\n if not data_only:\n print('% ' + re.sub(\"\\n\", \"\\n% \", '\\n'.join(self.comment)), file=fout)\n print(\"@relation \" + self.relation, file=fout)\n self.write_attributes(fout=fout)\n if not schema_only:\n print(\"@data\", file=fout)\n for d in self.data:\n line_str = self.write_line(d, fmt=fmt)\n if line_str:\n print(line_str, file=fout)\n if isinstance(fout, StringIO) and close:\n return fout.getvalue()\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.close_stream
|
python
|
def close_stream(self):
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
|
Terminates an open stream and returns the filename
of the file containing the streamed data.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L424-L436
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.save
|
python
|
def save(self, filename=None):
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
|
Save an arff structure to a file.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L438-L445
|
[
"def write(self,\n fout=None,\n fmt=SPARSE,\n schema_only=False,\n data_only=False):\n \"\"\"\n Write an arff structure to a string.\n \"\"\"\n assert not (schema_only and data_only), 'Make up your mind.'\n assert fmt in FORMATS, 'Invalid format \"%s\". Should be one of: %s' % (fmt, ', '.join(FORMATS))\n close = False\n if fout is None:\n close = True\n fout = StringIO()\n if not data_only:\n print('% ' + re.sub(\"\\n\", \"\\n% \", '\\n'.join(self.comment)), file=fout)\n print(\"@relation \" + self.relation, file=fout)\n self.write_attributes(fout=fout)\n if not schema_only:\n print(\"@data\", file=fout)\n for d in self.data:\n line_str = self.write_line(d, fmt=fmt)\n if line_str:\n print(line_str, file=fout)\n if isinstance(fout, StringIO) and close:\n return fout.getvalue()\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.write_line
|
python
|
def write_line(self, d, fmt=SPARSE):
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
|
Converts a single data line to a string.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L447-L532
|
[
"def convert_weka_to_py_date_pattern(p):\n \"\"\"\n Converts the date format pattern used by Weka to the date format pattern used by Python's datetime.strftime().\n \"\"\"\n # https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior\n # https://www.cs.waikato.ac.nz/ml/weka/arff.html\n p = p.replace('yyyy', r'%Y')\n p = p.replace('MM', r'%m')\n p = p.replace('dd', r'%d')\n p = p.replace('HH', r'%H')\n p = p.replace('mm', r'%M')\n p = p.replace('ss', r'%S')\n return p\n",
"def esc(self, s):\n \"\"\"\n Escape a string if it contains spaces.\n \"\"\"\n return (\"\\'\" + s + \"\\'\").replace(\"''\", \"'\")\n",
"def smart_quote(s):\n if isinstance(s, basestring) and ' ' in s and s[0] != '\"':\n s = '\"%s\"' % s\n return s\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.write
|
python
|
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
|
Write an arff structure to a string.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L559-L584
|
[
" def write_line(self, d, fmt=SPARSE):\n \"\"\"\n Converts a single data line to a string.\n \"\"\"\n\n def smart_quote(s):\n if isinstance(s, basestring) and ' ' in s and s[0] != '\"':\n s = '\"%s\"' % s\n return s\n\n if fmt == DENSE:\n #TODO:fix\n assert not isinstance(d, dict), NotImplemented\n line = []\n for e, a in zip(d, self.attributes):\n at = self.attribute_types[a]\n if at in NUMERIC_TYPES:\n line.append(str(e))\n elif at == TYPE_STRING:\n line.append(self.esc(e))\n elif at == TYPE_NOMINAL:\n line.append(e)\n else:\n raise Exception(\"Type \" + at + \" not supported for writing!\")\n s = ','.join(map(str, line))\n return s\n elif fmt == SPARSE:\n line = []\n\n # Convert flat row into dictionary.\n if isinstance(d, (list, tuple)):\n d = dict(zip(self.attributes, d))\n for k in d:\n at = self.attribute_types.get(k)\n if isinstance(d[k], Value):\n continue\n elif d[k] == MISSING:\n d[k] = Str(d[k])\n elif at in (TYPE_NUMERIC, TYPE_REAL):\n d[k] = Num(d[k])\n elif at == TYPE_STRING:\n d[k] = Str(d[k])\n elif at == TYPE_INTEGER:\n d[k] = Int(d[k])\n elif at == TYPE_NOMINAL:\n d[k] = Nom(d[k])\n elif at == TYPE_DATE:\n d[k] = Date(d[k])\n else:\n raise Exception('Unknown type: %s' % at)\n\n for i, name in enumerate(self.attributes):\n v = d.get(name)\n if v is None:\n# print 'Skipping attribute with None value:', name\n continue\n elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):\n v = MISSING\n elif isinstance(v, String):\n v = '\"%s\"' % v.value\n elif isinstance(v, Date):\n date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)\n date_format = convert_weka_to_py_date_pattern(date_format)\n if isinstance(v.value, basestring):\n _value = dateutil.parser.parse(v.value)\n else:\n assert isinstance(v.value, (date, datetime))\n _value = v.value\n v.value = v = _value.strftime(date_format)\n elif isinstance(v, Value):\n v = v.value\n\n if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):\n pass\n else:\n line.append('%i %s' % (i, smart_quote(v)))\n\n if len(line) == 1 and MISSING in line[-1]:\n # Skip lines with nothing other than a missing class.\n return\n elif not line:\n # Don't write blank lines.\n return\n return '{' + (', '.join(line)) + '}'\n else:\n raise Exception('Uknown format: %s' % (fmt,))\n",
"def write_attributes(self, fout=None):\n close = False\n if fout is None:\n close = True\n fout = StringIO()\n for a in self.attributes:\n at = self.attribute_types[a]\n if at == TYPE_INTEGER:\n print(\"@attribute \" + self.esc(a) + \" integer\", file=fout)\n elif at in (TYPE_NUMERIC, TYPE_REAL):\n print(\"@attribute \" + self.esc(a) + \" numeric\", file=fout)\n elif at == TYPE_STRING:\n print(\"@attribute \" + self.esc(a) + \" string\", file=fout)\n elif at == TYPE_NOMINAL:\n nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]\n nom_vals = sorted(nom_vals)\n print(\"@attribute \" + self.esc(a) + \" {\" + ','.join(map(str, nom_vals)) + \"}\", file=fout)\n elif at == TYPE_DATE:\n # https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes\n print('@attribute %s date \"%s\"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)\n else:\n raise Exception(\"Type \" + at + \" not supported for writing!\")\n if isinstance(fout, StringIO) and close:\n return fout.getvalue()\n"
] |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.define_attribute
|
python
|
def define_attribute(self, name, atype, data=None):
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
|
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L592-L601
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.dump
|
python
|
def dump(self):
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
|
Print an overview of the ARFF file.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L722-L732
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/arff.py
|
ArffFile.alphabetize_attributes
|
python
|
def alphabetize_attributes(self):
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
|
Orders attributes names alphabetically, except for the class attribute, which is kept last.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L746-L750
| null |
class ArffFile(object):
"""An ARFF File object describes a data set consisting of a number
of data points made up of attributes. The whole data set is called
a 'relation'. Supported attributes are:
- 'numeric': floating point numbers
- 'string': strings
- 'nominal': taking one of a number of possible values
Not all features of ARFF files are supported yet. The most notable
exceptions are:
- no sparse data
- no support for date and relational attributes
Also, parsing of strings might still be a bit brittle.
You can either load or save from files, or write and parse from a
string.
You can also construct an empty ARFF file and then fill in your
data by hand. To define attributes use the define_attribute method.
Attributes are:
- 'relation': name of the relation
- 'attributes': names of the attributes
- 'attribute_types': types of the attributes
- 'attribute_data': additional data, for example for nominal attributes.
- 'comment': the initial comment in the file. Typically contains some
information on the data set.
- 'data': the actual data, by data points.
"""
def __init__(self, relation='', schema=None):
"""Construct an empty ARFF structure."""
self.relation = relation
self.clear()
# Load schema.
if schema:
for name, data in schema:
name = STRIP_QUOTES_REGEX.sub('', name)
self.attributes.append(name)
if isinstance(data, (tuple, list)):
self.attribute_types[name] = TYPE_NOMINAL
self.attribute_data[name] = set(data)
else:
self.attribute_types[name] = data
self.attribute_data[name] = None
def clear(self):
self.attributes = [] # [attr_name, attr_name, ...]
self.attribute_types = dict() # {attr_name:type}
self.attribute_data = dict() # {attr_name:[nominal values]}
self._filename = None
self.comment = []
self.data = []
self.lineno = 0
self.fout = None
self.class_attr_name = None
def get_attribute_value(self, name, index):
"""
Returns the value associated with the given value index
of the attribute with the given name.
This is only applicable for nominal and string types.
"""
if index == MISSING:
return
elif self.attribute_types[name] in NUMERIC_TYPES:
at = self.attribute_types[name]
if at == TYPE_INTEGER:
return int(index)
return Decimal(str(index))
else:
assert self.attribute_types[name] == TYPE_NOMINAL
cls_index, cls_value = index.split(':')
#return self.attribute_data[name][index-1]
if cls_value != MISSING:
assert cls_value in self.attribute_data[name], \
'Predicted value "%s" but only values %s are allowed.' \
% (cls_value, ', '.join(self.attribute_data[name]))
return cls_value
def __len__(self):
return len(self.data)
def __iter__(self):
for d in self.data:
named = dict(zip(
[re.sub(r'^[\'\"]|[\'\"]$', '', _) for _ in self.attributes],
d))
assert len(d) == len(self.attributes)
assert len(d) == len(named)
yield named
@classmethod
def load(cls, filename, schema_only=False):
"""
Load an ARFF File from a file.
"""
o = open(filename)
s = o.read()
a = cls.parse(s, schema_only=schema_only)
if not schema_only:
a._filename = filename
o.close()
return a
@classmethod
def parse(cls, s, schema_only=False):
"""
Parse an ARFF File already loaded into a string.
"""
a = cls()
a.state = 'comment'
a.lineno = 1
for l in s.splitlines():
a.parseline(l)
a.lineno += 1
if schema_only and a.state == 'data':
# Don't parse data if we're only loading the schema.
break
return a
def copy(self, schema_only=False):
"""
Creates a deepcopy of the instance.
If schema_only is True, the data will be excluded from the copy.
"""
o = type(self)()
o.relation = self.relation
o.attributes = list(self.attributes)
o.attribute_types = self.attribute_types.copy()
o.attribute_data = self.attribute_data.copy()
if not schema_only:
o.comment = list(self.comment)
o.data = copy.deepcopy(self.data)
return o
def flush(self):
if self.fout:
self.fout.flush()
def open_stream(self, class_attr_name=None, fn=None):
"""
Save an arff structure to a file, leaving the file object
open for writing of new data samples.
This prevents you from directly accessing the data via Python,
but when generating a huge file, this prevents all your data
from being stored in memory.
"""
if fn:
self.fout_fn = fn
else:
fd, self.fout_fn = tempfile.mkstemp()
os.close(fd)
self.fout = open(self.fout_fn, 'w')
if class_attr_name:
self.class_attr_name = class_attr_name
self.write(fout=self.fout, schema_only=True)
self.write(fout=self.fout, data_only=True)
self.fout.flush()
def close_stream(self):
"""
Terminates an open stream and returns the filename
of the file containing the streamed data.
"""
if self.fout:
fout = self.fout
fout_fn = self.fout_fn
self.fout.flush()
self.fout.close()
self.fout = None
self.fout_fn = None
return fout_fn
def save(self, filename=None):
"""
Save an arff structure to a file.
"""
filename = filename or self._filename
o = open(filename, 'w')
o.write(self.write())
o.close()
def write_line(self, d, fmt=SPARSE):
"""
Converts a single data line to a string.
"""
def smart_quote(s):
if isinstance(s, basestring) and ' ' in s and s[0] != '"':
s = '"%s"' % s
return s
if fmt == DENSE:
#TODO:fix
assert not isinstance(d, dict), NotImplemented
line = []
for e, a in zip(d, self.attributes):
at = self.attribute_types[a]
if at in NUMERIC_TYPES:
line.append(str(e))
elif at == TYPE_STRING:
line.append(self.esc(e))
elif at == TYPE_NOMINAL:
line.append(e)
else:
raise Exception("Type " + at + " not supported for writing!")
s = ','.join(map(str, line))
return s
elif fmt == SPARSE:
line = []
# Convert flat row into dictionary.
if isinstance(d, (list, tuple)):
d = dict(zip(self.attributes, d))
for k in d:
at = self.attribute_types.get(k)
if isinstance(d[k], Value):
continue
elif d[k] == MISSING:
d[k] = Str(d[k])
elif at in (TYPE_NUMERIC, TYPE_REAL):
d[k] = Num(d[k])
elif at == TYPE_STRING:
d[k] = Str(d[k])
elif at == TYPE_INTEGER:
d[k] = Int(d[k])
elif at == TYPE_NOMINAL:
d[k] = Nom(d[k])
elif at == TYPE_DATE:
d[k] = Date(d[k])
else:
raise Exception('Unknown type: %s' % at)
for i, name in enumerate(self.attributes):
v = d.get(name)
if v is None:
# print 'Skipping attribute with None value:', name
continue
elif v == MISSING or (isinstance(v, Value) and v.value == MISSING):
v = MISSING
elif isinstance(v, String):
v = '"%s"' % v.value
elif isinstance(v, Date):
date_format = self.attribute_data.get(name, DEFAULT_DATE_FORMAT)
date_format = convert_weka_to_py_date_pattern(date_format)
if isinstance(v.value, basestring):
_value = dateutil.parser.parse(v.value)
else:
assert isinstance(v.value, (date, datetime))
_value = v.value
v.value = v = _value.strftime(date_format)
elif isinstance(v, Value):
v = v.value
if v != MISSING and self.attribute_types[name] == TYPE_NOMINAL and str(v) not in map(str, self.attribute_data[name]):
pass
else:
line.append('%i %s' % (i, smart_quote(v)))
if len(line) == 1 and MISSING in line[-1]:
# Skip lines with nothing other than a missing class.
return
elif not line:
# Don't write blank lines.
return
return '{' + (', '.join(line)) + '}'
else:
raise Exception('Uknown format: %s' % (fmt,))
def write_attributes(self, fout=None):
close = False
if fout is None:
close = True
fout = StringIO()
for a in self.attributes:
at = self.attribute_types[a]
if at == TYPE_INTEGER:
print("@attribute " + self.esc(a) + " integer", file=fout)
elif at in (TYPE_NUMERIC, TYPE_REAL):
print("@attribute " + self.esc(a) + " numeric", file=fout)
elif at == TYPE_STRING:
print("@attribute " + self.esc(a) + " string", file=fout)
elif at == TYPE_NOMINAL:
nom_vals = [_ for _ in self.attribute_data[a] if _ != MISSING]
nom_vals = sorted(nom_vals)
print("@attribute " + self.esc(a) + " {" + ','.join(map(str, nom_vals)) + "}", file=fout)
elif at == TYPE_DATE:
# https://weka.wikispaces.com/ARFF+(stable+version)#Examples-The%20@attribute%20Declarations-Date%20attributes
print('@attribute %s date "%s"' % (self.esc(a), self.attribute_data.get(a, DEFAULT_DATE_FORMAT)), file=fout)
else:
raise Exception("Type " + at + " not supported for writing!")
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue()
def esc(self, s):
"""
Escape a string if it contains spaces.
"""
return ("\'" + s + "\'").replace("''", "'")
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data
def parseline(self, l):
if self.state == 'comment':
if l and l[0] == '%':
self.comment.append(l[2:])
else:
self.comment = '\n'.join(self.comment)
self.state = 'in_header'
self.parseline(l)
elif self.state == 'in_header':
ll = l.lower()
if ll.startswith('@relation '):
self.__parse_relation(l)
if ll.startswith('@attribute '):
self.__parse_attribute(l)
if ll.startswith('@data'):
self.state = 'data'
elif self.state == 'data':
if l and l[0] != '%':
self._parse_data(l)
def __parse_relation(self, l):
l = l.split()
self.relation = l[1]
def __parse_attribute(self, l):
p = re.compile(r'[a-zA-Z_][a-zA-Z0-9_\-\[\]]*|\{[^\}]*\}|\'[^\']+\'|\"[^\"]+\"')
l = [s.strip() for s in p.findall(l)]
name = l[1]
name = STRIP_QUOTES_REGEX.sub('', name)
atype = l[2]#.lower()
if atype == TYPE_INTEGER:
self.define_attribute(name, TYPE_INTEGER)
elif (atype == TYPE_REAL or atype == TYPE_NUMERIC):
self.define_attribute(name, TYPE_NUMERIC)
elif atype == TYPE_STRING:
self.define_attribute(name, TYPE_STRING)
elif atype == TYPE_DATE:
data = None
if len(l) >= 4:
data = STRIP_QUOTES_REGEX.sub('', l[3])
self.define_attribute(name, TYPE_DATE, data=data)
elif atype[0] == '{' and atype[-1] == '}':
values = [s.strip() for s in atype[1:-1].split(',')]
self.define_attribute(name, TYPE_NOMINAL, values)
else:
raise NotImplementedError("Unsupported type " + atype + " for attribute " + name + ".")
def _parse_data(self, l):
if isinstance(l, basestring):
l = l.strip()
if l.startswith('{'):
assert l.endswith('}'), 'Malformed sparse data line: %s' % (l,)
assert not self.fout, NotImplemented
dline = {}
parts = re.split(r'(?<!\\),', l[1:-1])
for part in parts:
index, value = re.findall(r'(^[0-9]+)\s+(.*)$', part.strip())[0]
index = int(index)
if value[0] == value[-1] and value[0] in ('"', "'"):
# Strip quotes.
value = value[1:-1]
# TODO:0 or 1-indexed? Weka uses 0-indexing?
#name = self.attributes[index-1]
name = self.attributes[index]
ValueClass = TYPE_TO_CLASS[self.attribute_types[name]]
if value == MISSING:
dline[name] = Str(value)
else:
dline[name] = ValueClass(value)
self.data.append(dline)
return
else:
# Convert string to list.
l = [s.strip() for s in l.split(',')]
elif isinstance(l, dict):
assert len(l) == len(self.attributes), \
"Sparse data not supported."
# Convert dict to list.
#l = dict((k,v) for k,v in l.iteritems())
# Confirm complete feature name overlap.
assert set(self.esc(a) for a in l) == \
set(self.esc(a) for a in self.attributes)
l = [l[name] for name in self.attributes]
else:
# Otherwise, confirm list.
assert isinstance(l, (tuple, list))
if len(l) != len(self.attributes):
print("Warning: line %d contains %i values but it should contain %i values" % (self.lineno, len(l), len(self.attributes)))
return
datum = []
for n, v in zip(self.attributes, l):
at = self.attribute_types[n]
if v == MISSING:
datum.append(v)
elif at == TYPE_INTEGER:
datum.append(int(v))
elif at in (TYPE_NUMERIC, TYPE_REAL):
datum.append(Decimal(str(v)))
elif at == TYPE_STRING:
datum.append(v)
elif at == TYPE_NOMINAL:
if v in self.attribute_data[n]:
datum.append(v)
else:
raise Exception('Incorrect value %s for nominal attribute %s' % (v, n))
if self.fout:
# If we're streaming out data, then don't even bother saving it to
# memory and just flush it out to disk instead.
line_str = self.write_line(datum)
if line_str:
print(line_str, file=self.fout)
self.fout.flush()
else:
self.data.append(datum)
def __print_warning(self, msg):
print(('Warning (line %d): ' % self.lineno) + msg)
def dump(self):
"""Print an overview of the ARFF file."""
print("Relation " + self.relation)
print(" With attributes")
for n in self.attributes:
if self.attribute_types[n] != TYPE_NOMINAL:
print(" %s of type %s" % (n, self.attribute_types[n]))
else:
print(" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n]))
for d in self.data:
print(d)
def set_class(self, name):
assert name in self.attributes
self.attributes.remove(name)
self.attributes.append(name)
def set_nominal_values(self, name, values):
assert name in self.attributes
assert self.attribute_types[name] == TYPE_NOMINAL
self.attribute_data.setdefault(name, set())
self.attribute_data[name] = set(self.attribute_data[name])
self.attribute_data[name].update(values)
def append(self, line, schema_only=False, update_schema=True):
schema_change = False
if isinstance(line, dict):
# Validate line types against schema.
if update_schema:
for k, v in list(line.items()):
prior_type = self.attribute_types.get(k, v.c_type if isinstance(v, Value) else None)
if not isinstance(v, Value):
if v == MISSING:
v = Str(v)
else:
print('prior_type:', prior_type, k, v)
v = TYPE_TO_CLASS[prior_type](v)
if v.value != MISSING:
assert prior_type == v.c_type, \
('Attempting to set attribute %s to type %s but it is already defined as type %s.') % (k, prior_type, v.c_type)
if k not in self.attribute_types:
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k in line:
del line[k]
else:
self.attribute_types[k] = v.c_type
self.attributes.append(k)
schema_change = True
if isinstance(v, Nominal):
if self.fout:
# Remove feature that violates the schema
# during streaming.
if k not in self.attributes:
if k in line:
del line[k]
elif v.value not in self.attribute_data[k]:
if k in line:
del line[k]
else:
self.attribute_data.setdefault(k, set())
if v.value not in self.attribute_data[k]:
self.attribute_data[k].add(v.value)
schema_change = True
if v.cls:
if self.class_attr_name is None:
self.class_attr_name = k
else:
assert self.class_attr_name == k, \
('Attempting to set class to "%s" when it has already been set to "%s"') % (k, self.class_attr_name)
# Ensure the class attribute is the last one listed,
# as that's assumed to be the class unless otherwise specified.
if self.class_attr_name:
try:
self.attributes.remove(self.class_attr_name)
self.attributes.append(self.class_attr_name)
except ValueError:
pass
if schema_change:
assert not self.fout, 'Attempting to add data that doesn\'t match the schema while streaming.'
if not schema_only:
# Append line to data set.
if self.fout:
line_str = self.write_line(line)
if line_str:
print(line_str, file=self.fout)
else:
self.data.append(line)
else:
assert len(line) == len(self.attributes)
self._parse_data(line)
|
chrisspen/weka
|
weka/classifiers.py
|
Classifier.load_raw
|
python
|
def load_raw(cls, model_fn, schema, *args, **kwargs):
c = cls(*args, **kwargs)
c.schema = schema.copy(schema_only=True)
c._model_data = open(model_fn, 'rb').read()
return c
|
Loads a trained classifier from the raw Weka model format.
Must specify the model schema and classifier name, since
these aren't currently deduced from the model format.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/classifiers.py#L270-L279
| null |
class Classifier(BaseClassifier):
def __init__(self, name, ckargs=None, model_data=None):
self._model_data = model_data
self.name = name # Weka classifier class name.
self.schema = None
self.ckargs = ckargs
self.last_training_stdout = None
self.last_training_stderr = None
@classmethod
def _get_ckargs_str(self):
ckargs = []
if self.ckargs:
for k, v in iteritems(self.ckargs):
if not k.startswith('-'):
k = '-'+k
if v is None:
ckargs.append('%s' % (k,))
else:
ckargs.append('%s %s' % (k, v))
ckargs = ' '.join(ckargs)
return ckargs
@property
def training_correlation_coefficient(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Correlation coefficient\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
@property
def training_mean_absolute_error(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Mean absolute error\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
def train(self, training_data, testing_data=None, verbose=False):
"""
Updates the classifier with new data.
"""
model_fn = None
training_fn = None
clean_training = False
testing_fn = None
clean_testing = False
try:
# Validate training data.
if isinstance(training_data, basestring):
assert os.path.isfile(training_data)
training_fn = training_data
else:
assert isinstance(training_data, arff.ArffFile)
fd, training_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(training_fn, 'w') as fout:
fout.write(training_data.write())
clean_training = True
assert training_fn
# Validate testing data.
if testing_data:
if isinstance(testing_data, basestring):
assert os.path.isfile(testing_data)
testing_fn = testing_data
else:
assert isinstance(testing_data, arff.ArffFile)
fd, testing_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(testing_fn, 'w') as fout:
fout.write(testing_data.write())
clean_testing = True
else:
testing_fn = training_fn
assert testing_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
if self._model_data:
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
training_fn=training_fn,
testing_fn=testing_fn,
ckargs=self._get_ckargs_str(),
)
if self._model_data:
# Load existing model.
cmd = (
"java -cp %(CP)s %(classifier_name)s -l \"%(model_fn)s\" "
"-t \"%(training_fn)s\" -T \"%(testing_fn)s\" -d \"%(model_fn)s\"") % args
else:
# Create new model file.
cmd = (
"java -cp %(CP)s %(classifier_name)s -t \"%(training_fn)s\" "
"-T \"%(testing_fn)s\" -d \"%(model_fn)s\" %(ckargs)s") % args
if verbose:
print(cmd)
p = Popen(
cmd,
shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=sys.platform != "win32")
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
self.last_training_stdout = stdout_str
self.last_training_stderr = stderr_str
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
# exclude "Warning" lines not to raise an error for a simple warning
stderr_str = '\n'.join(l for l in stderr_str.decode('utf8').split('\n') if not "Warning" in l)
if stderr_str:
raise TrainingError(stderr_str)
# Save schema.
if not self.schema:
self.schema = arff.ArffFile.load(training_fn, schema_only=True).copy(schema_only=True)
# Save model.
with open(model_fn, 'rb') as fin:
self._model_data = fin.read()
assert self._model_data
finally:
# Cleanup files.
if model_fn:
os.remove(model_fn)
if training_fn and clean_training:
os.remove(training_fn)
if testing_fn and clean_testing:
os.remove(testing_fn)
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
"""
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
"""
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
#assert isinstance(query_data, arff.ArffFile) #TODO: doesn't work in Python 3.*?
assert type(query_data).__name__ == 'ArffFile', 'Must be of type ArffFile, not "%s"' % type(query_data).__name__
fd, query_fn = tempfile.mkstemp(suffix='.arff')
if verbose:
print('writing', query_fn)
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, "You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = ("java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s -l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
# Expected output with simple format:
# inst# actual predicted error
# 1 ? -3.417 ?
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, ("No results found matching distribution pattern in stdout: %s") % stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
# inst# actual predicted
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\-\.\?\:]+)\s+([a-zA-Z0-9\-_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, "No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn)
def test(self, test_data, verbose=0):
data = arff.ArffFile.load(test_data)
data_itr = iter(data)
i = 0
correct = 0
total = 0
for result in self.predict(test_data, verbose=verbose):
i += 1
if verbose:
print(i, result)
row = next(data_itr)
total += 1
correct += result.predicted == result.actual
return correct/float(total)
|
chrisspen/weka
|
weka/classifiers.py
|
Classifier.train
|
python
|
def train(self, training_data, testing_data=None, verbose=False):
model_fn = None
training_fn = None
clean_training = False
testing_fn = None
clean_testing = False
try:
# Validate training data.
if isinstance(training_data, basestring):
assert os.path.isfile(training_data)
training_fn = training_data
else:
assert isinstance(training_data, arff.ArffFile)
fd, training_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(training_fn, 'w') as fout:
fout.write(training_data.write())
clean_training = True
assert training_fn
# Validate testing data.
if testing_data:
if isinstance(testing_data, basestring):
assert os.path.isfile(testing_data)
testing_fn = testing_data
else:
assert isinstance(testing_data, arff.ArffFile)
fd, testing_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(testing_fn, 'w') as fout:
fout.write(testing_data.write())
clean_testing = True
else:
testing_fn = training_fn
assert testing_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
if self._model_data:
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
training_fn=training_fn,
testing_fn=testing_fn,
ckargs=self._get_ckargs_str(),
)
if self._model_data:
# Load existing model.
cmd = (
"java -cp %(CP)s %(classifier_name)s -l \"%(model_fn)s\" "
"-t \"%(training_fn)s\" -T \"%(testing_fn)s\" -d \"%(model_fn)s\"") % args
else:
# Create new model file.
cmd = (
"java -cp %(CP)s %(classifier_name)s -t \"%(training_fn)s\" "
"-T \"%(testing_fn)s\" -d \"%(model_fn)s\" %(ckargs)s") % args
if verbose:
print(cmd)
p = Popen(
cmd,
shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=sys.platform != "win32")
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
self.last_training_stdout = stdout_str
self.last_training_stderr = stderr_str
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
# exclude "Warning" lines not to raise an error for a simple warning
stderr_str = '\n'.join(l for l in stderr_str.decode('utf8').split('\n') if not "Warning" in l)
if stderr_str:
raise TrainingError(stderr_str)
# Save schema.
if not self.schema:
self.schema = arff.ArffFile.load(training_fn, schema_only=True).copy(schema_only=True)
# Save model.
with open(model_fn, 'rb') as fin:
self._model_data = fin.read()
assert self._model_data
finally:
# Cleanup files.
if model_fn:
os.remove(model_fn)
if training_fn and clean_training:
os.remove(training_fn)
if testing_fn and clean_testing:
os.remove(testing_fn)
|
Updates the classifier with new data.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/classifiers.py#L312-L417
|
[
"def load(cls, filename, schema_only=False):\n \"\"\"\n Load an ARFF File from a file.\n \"\"\"\n o = open(filename)\n s = o.read()\n a = cls.parse(s, schema_only=schema_only)\n if not schema_only:\n a._filename = filename\n o.close()\n return a\n",
"def _get_ckargs_str(self):\n ckargs = []\n if self.ckargs:\n for k, v in iteritems(self.ckargs):\n if not k.startswith('-'):\n k = '-'+k\n if v is None:\n ckargs.append('%s' % (k,))\n else:\n ckargs.append('%s %s' % (k, v))\n ckargs = ' '.join(ckargs)\n return ckargs\n"
] |
class Classifier(BaseClassifier):
def __init__(self, name, ckargs=None, model_data=None):
self._model_data = model_data
self.name = name # Weka classifier class name.
self.schema = None
self.ckargs = ckargs
self.last_training_stdout = None
self.last_training_stderr = None
@classmethod
def load_raw(cls, model_fn, schema, *args, **kwargs):
"""
Loads a trained classifier from the raw Weka model format.
Must specify the model schema and classifier name, since
these aren't currently deduced from the model format.
"""
c = cls(*args, **kwargs)
c.schema = schema.copy(schema_only=True)
c._model_data = open(model_fn, 'rb').read()
return c
def _get_ckargs_str(self):
ckargs = []
if self.ckargs:
for k, v in iteritems(self.ckargs):
if not k.startswith('-'):
k = '-'+k
if v is None:
ckargs.append('%s' % (k,))
else:
ckargs.append('%s %s' % (k, v))
ckargs = ' '.join(ckargs)
return ckargs
@property
def training_correlation_coefficient(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Correlation coefficient\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
@property
def training_mean_absolute_error(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Mean absolute error\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
def train(self, training_data, testing_data=None, verbose=False):
"""
Updates the classifier with new data.
"""
model_fn = None
training_fn = None
clean_training = False
testing_fn = None
clean_testing = False
try:
# Validate training data.
if isinstance(training_data, basestring):
assert os.path.isfile(training_data)
training_fn = training_data
else:
assert isinstance(training_data, arff.ArffFile)
fd, training_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(training_fn, 'w') as fout:
fout.write(training_data.write())
clean_training = True
assert training_fn
# Validate testing data.
if testing_data:
if isinstance(testing_data, basestring):
assert os.path.isfile(testing_data)
testing_fn = testing_data
else:
assert isinstance(testing_data, arff.ArffFile)
fd, testing_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(testing_fn, 'w') as fout:
fout.write(testing_data.write())
clean_testing = True
else:
testing_fn = training_fn
assert testing_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
if self._model_data:
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
training_fn=training_fn,
testing_fn=testing_fn,
ckargs=self._get_ckargs_str(),
)
if self._model_data:
# Load existing model.
cmd = (
"java -cp %(CP)s %(classifier_name)s -l \"%(model_fn)s\" "
"-t \"%(training_fn)s\" -T \"%(testing_fn)s\" -d \"%(model_fn)s\"") % args
else:
# Create new model file.
cmd = (
"java -cp %(CP)s %(classifier_name)s -t \"%(training_fn)s\" "
"-T \"%(testing_fn)s\" -d \"%(model_fn)s\" %(ckargs)s") % args
if verbose:
print(cmd)
p = Popen(
cmd,
shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=sys.platform != "win32")
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
self.last_training_stdout = stdout_str
self.last_training_stderr = stderr_str
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
# exclude "Warning" lines not to raise an error for a simple warning
stderr_str = '\n'.join(l for l in stderr_str.decode('utf8').split('\n') if not "Warning" in l)
if stderr_str:
raise TrainingError(stderr_str)
# Save schema.
if not self.schema:
self.schema = arff.ArffFile.load(training_fn, schema_only=True).copy(schema_only=True)
# Save model.
with open(model_fn, 'rb') as fin:
self._model_data = fin.read()
assert self._model_data
finally:
# Cleanup files.
if model_fn:
os.remove(model_fn)
if training_fn and clean_training:
os.remove(training_fn)
if testing_fn and clean_testing:
os.remove(testing_fn)
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
"""
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
"""
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
#assert isinstance(query_data, arff.ArffFile) #TODO: doesn't work in Python 3.*?
assert type(query_data).__name__ == 'ArffFile', 'Must be of type ArffFile, not "%s"' % type(query_data).__name__
fd, query_fn = tempfile.mkstemp(suffix='.arff')
if verbose:
print('writing', query_fn)
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, "You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = ("java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s -l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
# Expected output with simple format:
# inst# actual predicted error
# 1 ? -3.417 ?
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, ("No results found matching distribution pattern in stdout: %s") % stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
# inst# actual predicted
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\-\.\?\:]+)\s+([a-zA-Z0-9\-_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, "No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn)
def test(self, test_data, verbose=0):
data = arff.ArffFile.load(test_data)
data_itr = iter(data)
i = 0
correct = 0
total = 0
for result in self.predict(test_data, verbose=verbose):
i += 1
if verbose:
print(i, result)
row = next(data_itr)
total += 1
correct += result.predicted == result.actual
return correct/float(total)
|
chrisspen/weka
|
weka/classifiers.py
|
Classifier.predict
|
python
|
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
#assert isinstance(query_data, arff.ArffFile) #TODO: doesn't work in Python 3.*?
assert type(query_data).__name__ == 'ArffFile', 'Must be of type ArffFile, not "%s"' % type(query_data).__name__
fd, query_fn = tempfile.mkstemp(suffix='.arff')
if verbose:
print('writing', query_fn)
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, "You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = ("java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s -l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
# Expected output with simple format:
# inst# actual predicted error
# 1 ? -3.417 ?
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, ("No results found matching distribution pattern in stdout: %s") % stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
# inst# actual predicted
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\-\.\?\:]+)\s+([a-zA-Z0-9\-_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, "No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn)
|
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/classifiers.py#L419-L592
|
[
"def load(cls, filename, schema_only=False):\n \"\"\"\n Load an ARFF File from a file.\n \"\"\"\n o = open(filename)\n s = o.read()\n a = cls.parse(s, schema_only=schema_only)\n if not schema_only:\n a._filename = filename\n o.close()\n return a\n"
] |
class Classifier(BaseClassifier):
def __init__(self, name, ckargs=None, model_data=None):
self._model_data = model_data
self.name = name # Weka classifier class name.
self.schema = None
self.ckargs = ckargs
self.last_training_stdout = None
self.last_training_stderr = None
@classmethod
def load_raw(cls, model_fn, schema, *args, **kwargs):
"""
Loads a trained classifier from the raw Weka model format.
Must specify the model schema and classifier name, since
these aren't currently deduced from the model format.
"""
c = cls(*args, **kwargs)
c.schema = schema.copy(schema_only=True)
c._model_data = open(model_fn, 'rb').read()
return c
def _get_ckargs_str(self):
ckargs = []
if self.ckargs:
for k, v in iteritems(self.ckargs):
if not k.startswith('-'):
k = '-'+k
if v is None:
ckargs.append('%s' % (k,))
else:
ckargs.append('%s %s' % (k, v))
ckargs = ' '.join(ckargs)
return ckargs
@property
def training_correlation_coefficient(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Correlation coefficient\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
@property
def training_mean_absolute_error(self):
s = self.last_training_stdout
if PY3:
s = s.decode('utf-8')
matches = re.findall(r'Mean absolute error\s+([0-9\.]+)', s)
if matches:
return float(matches[0])
def train(self, training_data, testing_data=None, verbose=False):
"""
Updates the classifier with new data.
"""
model_fn = None
training_fn = None
clean_training = False
testing_fn = None
clean_testing = False
try:
# Validate training data.
if isinstance(training_data, basestring):
assert os.path.isfile(training_data)
training_fn = training_data
else:
assert isinstance(training_data, arff.ArffFile)
fd, training_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(training_fn, 'w') as fout:
fout.write(training_data.write())
clean_training = True
assert training_fn
# Validate testing data.
if testing_data:
if isinstance(testing_data, basestring):
assert os.path.isfile(testing_data)
testing_fn = testing_data
else:
assert isinstance(testing_data, arff.ArffFile)
fd, testing_fn = tempfile.mkstemp(suffix='.arff')
os.close(fd)
with open(testing_fn, 'w') as fout:
fout.write(testing_data.write())
clean_testing = True
else:
testing_fn = training_fn
assert testing_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
if self._model_data:
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
training_fn=training_fn,
testing_fn=testing_fn,
ckargs=self._get_ckargs_str(),
)
if self._model_data:
# Load existing model.
cmd = (
"java -cp %(CP)s %(classifier_name)s -l \"%(model_fn)s\" "
"-t \"%(training_fn)s\" -T \"%(testing_fn)s\" -d \"%(model_fn)s\"") % args
else:
# Create new model file.
cmd = (
"java -cp %(CP)s %(classifier_name)s -t \"%(training_fn)s\" "
"-T \"%(testing_fn)s\" -d \"%(model_fn)s\" %(ckargs)s") % args
if verbose:
print(cmd)
p = Popen(
cmd,
shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=sys.platform != "win32")
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
self.last_training_stdout = stdout_str
self.last_training_stderr = stderr_str
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
# exclude "Warning" lines not to raise an error for a simple warning
stderr_str = '\n'.join(l for l in stderr_str.decode('utf8').split('\n') if not "Warning" in l)
if stderr_str:
raise TrainingError(stderr_str)
# Save schema.
if not self.schema:
self.schema = arff.ArffFile.load(training_fn, schema_only=True).copy(schema_only=True)
# Save model.
with open(model_fn, 'rb') as fin:
self._model_data = fin.read()
assert self._model_data
finally:
# Cleanup files.
if model_fn:
os.remove(model_fn)
if training_fn and clean_training:
os.remove(training_fn)
if testing_fn and clean_testing:
os.remove(testing_fn)
def predict(self, query_data, verbose=False, distribution=False, cleanup=True):
"""
Iterates over the predicted values and probability (if supported).
Each iteration yields a tuple of the form (prediction, probability).
If the file is a test file (i.e. contains no query variables),
then the tuple will be of the form (prediction, actual).
See http://weka.wikispaces.com/Making+predictions
for further explanation on interpreting Weka prediction output.
"""
model_fn = None
query_fn = None
clean_query = False
stdout = None
try:
# Validate query data.
if isinstance(query_data, basestring):
assert os.path.isfile(query_data)
query_fn = query_data
else:
#assert isinstance(query_data, arff.ArffFile) #TODO: doesn't work in Python 3.*?
assert type(query_data).__name__ == 'ArffFile', 'Must be of type ArffFile, not "%s"' % type(query_data).__name__
fd, query_fn = tempfile.mkstemp(suffix='.arff')
if verbose:
print('writing', query_fn)
os.close(fd)
open(query_fn, 'w').write(query_data.write())
clean_query = True
assert query_fn
# Validate model file.
fd, model_fn = tempfile.mkstemp()
os.close(fd)
assert self._model_data, "You must train this classifier before predicting."
fout = open(model_fn, 'wb')
fout.write(self._model_data)
fout.close()
# print(open(model_fn).read()
# print(open(query_fn).read()
# Call Weka Jar.
args = dict(
CP=CP,
classifier_name=self.name,
model_fn=model_fn,
query_fn=query_fn,
#ckargs = self._get_ckargs_str(),
distribution=('-distribution' if distribution else ''),
)
cmd = ("java -cp %(CP)s %(classifier_name)s -p 0 %(distribution)s -l \"%(model_fn)s\" -T \"%(query_fn)s\"") % args
if verbose:
print(cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
stdout_str = stdout.read()
stderr_str = stderr.read()
if verbose:
print('stdout:')
print(stdout_str)
print('stderr:')
print(stderr_str)
if stderr_str:
raise PredictionError(stderr_str)
if stdout_str:
# inst# actual predicted error prediction
#header = 'inst,actual,predicted,error'.split(',')
query = arff.ArffFile.load(query_fn)
query_variables = [
query.attributes[i]
for i, v in enumerate(query.data[0])
if v == arff.MISSING]
if not query_variables:
query_variables = [query.attributes[-1]]
# assert query_variables, \
# "There must be at least one query variable in the query."
if verbose:
print('query_variables:', query_variables)
header = 'predicted'.split(',')
# sample line: 1 1:? 4:36 + 1
# Expected output without distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 11:Acer_tr + 1
#=== Predictions on test data ===
#
# inst# actual predicted error
# 1 ? 7 ?
#=== Predictions on test data ===
#
# inst# actual predicted error prediction
# 1 1:? 1:0 0.99
# 2 1:? 1:0 0.99
# 3 1:? 1:0 0.99
# 4 1:? 1:0 0.99
# 5 1:? 1:0 0.99
# Expected output with distribution:
#=== Predictions on test data ===
#
# inst# actual predicted error distribution
# 1 1:? 11:Acer_tr + 0,0,0,0,0,0,0,0,0,0,*1,0,0,0,0,0...
# Expected output with simple format:
# inst# actual predicted error
# 1 ? -3.417 ?
q = re.findall(
r'J48 pruned tree\s+\-+:\s+([0-9]+)\s+',
stdout_str.decode('utf-8'), re.MULTILINE|re.DOTALL)
if q:
class_label = q[0]
prob = 1.0
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
elif re.findall(r'error\s+(?:distribution|prediction)', stdout_str.decode('utf-8')):
# Check for distribution output.
matches = re.findall(
r"^\s*[0-9\.]+\s+[a-zA-Z0-9\.\?\:]+\s+(?P<cls_value>[a-zA-Z0-9_\.\?\:]+)\s+\+?\s+(?P<prob>[a-zA-Z0-9\.\?\,\*]+)",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, ("No results found matching distribution pattern in stdout: %s") % stdout_str
for match in matches:
prediction, prob = match
class_index, class_label = prediction.split(':')
class_index = int(class_index)
if distribution:
# Convert list of probabilities into a hash linking the prob
# to the associated class value.
prob = dict(zip(
query.attribute_data[query.attributes[-1]],
map(float, prob.replace('*', '').split(','))))
else:
prob = float(prob)
class_label = query.attribute_data[query.attributes[-1]][class_index-1]
yield PredictionResult(
actual=None,
predicted=class_label,
probability=prob,)
else:
# Otherwise, assume a simple output.
matches = re.findall(
# inst# actual predicted
r"^\s*([0-9\.]+)\s+([a-zA-Z0-9\-\.\?\:]+)\s+([a-zA-Z0-9\-_\.\?\:]+)\s+",
stdout_str.decode('utf-8'),
re.MULTILINE)
assert matches, "No results found matching simple pattern in stdout: %s" % stdout_str
#print('matches:',len(matches)
for match in matches:
inst, actual, predicted = match
class_name = query.attributes[-1]
actual_value = query.get_attribute_value(class_name, actual)
predicted_value = query.get_attribute_value(class_name, predicted)
yield PredictionResult(
actual=actual_value,
predicted=predicted_value,
probability=None,)
finally:
# Cleanup files.
if cleanup:
if model_fn:
self._model_data = open(model_fn, 'rb').read()
os.remove(model_fn)
if query_fn and clean_query:
os.remove(query_fn)
def test(self, test_data, verbose=0):
data = arff.ArffFile.load(test_data)
data_itr = iter(data)
i = 0
correct = 0
total = 0
for result in self.predict(test_data, verbose=verbose):
i += 1
if verbose:
print(i, result)
row = next(data_itr)
total += 1
correct += result.predicted == result.actual
return correct/float(total)
|
chrisspen/weka
|
weka/classifiers.py
|
EnsembleClassifier.get_training_coverage
|
python
|
def get_training_coverage(self):
total = len(self.training_results)
i = sum(1 for data in self.training_results.values() if not isinstance(data, basestring))
return i/float(total)
|
Returns a ratio of classifiers that were able to be trained successfully.
|
train
|
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/classifiers.py#L640-L646
| null |
class EnsembleClassifier(BaseClassifier):
def __init__(self, classes=None):
self.best = None, None # score, cls
self.training_results = {} # {name: score}
self.trained_classifiers = {} # {name: classifier instance}
self.prediction_results = {} # {name: results}
self.classes = list(classes or WEKA_CLASSIFIERS)
for cls in self.classes:
assert cls in WEKA_CLASSIFIERS, 'Invalid class: %s' % cls
def get_training_best(self):
results = list(self.training_results.items())
results = sorted(results, key=lambda o: o[1])
print('name: <name> <coef> <inv mae>')
for name, data in results:
if isinstance(data, basestring):
continue
(coef, inv_mae) = data
print('name:', name, (coef, inv_mae))
def get_training_errors(self):
results = list(self.training_results.items())
results = sorted(results)
for name, data in results:
if not isinstance(data, basestring):
continue
print('name:', name)
print(data)
def train(self, training_data, testing_data=None, verbose=False):
total = len(self.classes)
i = 0
for name in self.classes:
i += 1
try:
c = Classifier(name=name)
print('Training classifier %i of %i %.02f%% %s...' % (i, total, i/float(total)*100, name))
t0 = time.time()
c.train(training_data=training_data, testing_data=testing_data, verbose=verbose)
self.trained_classifiers[name] = c
td = time.time() - t0
print('Training seconds:', td)
coef = c.training_correlation_coefficient
print('correlation_coefficient:', coef)
mae = c.training_mean_absolute_error
print('mean_absolute_error:', mae)
self.training_results[name] = (coef, 1/(1+float(mae)))
except Exception:
traceback.print_exc()
self.training_results[name] = traceback.format_exc()
def get_best_predictors(self, tolerance, verbose=False):
best_coef = -1e9999999999
best_names = set()
if verbose:
print('Name\tCoef\tInv MAE')
for name, data in sorted(self.training_results.items(), key=lambda o: o[1][0], reverse=True):
if isinstance(data, basestring):
continue
(coef, inv_mae) = data
if verbose:
print('%s\t%s\t%s' % (name, coef, inv_mae))
if coef > best_coef:
best_coef = coef
best_names = set([name])
elif (coef + tolerance) >= best_coef:
best_names.add(name)
return best_names
def predict(self, query_data, tolerance=0, **kwargs):
verbose = kwargs.get('verbose', False)
assert self.training_results, 'Classifier must be trained first!'
best_names = self.get_best_predictors(tolerance=tolerance)
total = len(best_names)
i = 0
for name in best_names:
i += 1
try:
c = self.trained_classifiers[name]
if verbose:
print('Querying classifier %i of %i %.02f%% %s...' % (i, total, i/float(total)*100, name))
t0 = time.time()
results = list(c.predict(query_data=query_data, **kwargs))
td = time.time() - t0
self.prediction_results[name] = results
except Exception:
traceback.print_exc()
self.prediction_results[name] = traceback.format_exc()
results = {} # {index, [results]}
for k, v in self.prediction_results.items():
for i, result in enumerate(v):
if isinstance(v, basestring):
continue
results.setdefault(i, [])
results[i].append(result)
results = [PredictionResult.avg(*data) for i, data in sorted(results.items())]
return results
|
heigeo/climata
|
climata/bin/acis_sites.py
|
load_sites
|
python
|
def load_sites(*basin_ids):
# Resolve basin ids to HUC8s if needed
basins = []
for basin in basin_ids:
if basin.isdigit() and len(basin) == 8:
basins.append(basin)
else:
from climata.huc8 import get_huc8
basins.extend(get_huc8(basin))
# Load sites with data since 1900
sites = StationMetaIO(
basin=basins,
parameter=list(elems.keys()),
start_date='1900-01-01',
end_date=date.today(),
meta=ALL_META_FIELDS,
)
# Load all sites (to get sites without data)
seen_sites = [site.uid for site in sites]
nodata_sites = [
site for site in StationMetaIO(basin=basins)
if site.uid not in seen_sites
]
# Determine the following from the site lists:
seen_auths = set() # Which authority codes are actually used by any site
seen_elems = set() # Which elems actually have data in any site
ranges = {} # The overall period of record for each site
for site in sites:
for auth in site.sids.keys():
seen_auths.add(auth)
start, end = None, None
for elem in site.valid_daterange:
s, e = site.valid_daterange[elem]
seen_elems.add(elem)
if s is None or e is None:
continue
if start is None or s < start:
start = s
if end is None or e > end:
end = e
ranges[site.uid] = [start, end]
# Check for authority codes that might not be in sites with data
for site in nodata_sites:
for auth in site.sids.keys():
seen_auths.add(auth)
# Print CSV headers (FIXME: use CsvFileIO for this?)
seen_auths = sorted(seen_auths)
seen_elems = sorted(seen_elems)
print(",".join(
['ACIS uid', 'name']
+ seen_auths
+ ['latitude', 'longitude', 'start', 'end', 'years']
+ [elems[elem]['desc'] for elem in seen_elems]
))
# Print sites with data
for site in sites:
# Determine if elems are available for entire period or shorter range
start, end = ranges[site.uid]
if start and end:
years = end.year - start.year + 1
elem_ranges = []
for elem in seen_elems:
estart, eend = site.valid_daterange[elem]
if estart is None:
erange = ""
elif estart == start and eend == end:
erange = "period"
else:
erange = "%s to %s" % (estart.date(), eend.date())
elem_ranges.append(erange)
# Output CSV row
print(",".join(map(
str,
[site.uid, site.name]
+ [site.sids.get(auth, "") for auth in seen_auths]
+ [site.latitude, site.longitude]
+ [start.date(), end.date(), years]
+ elem_ranges
)))
# Print CSV rows for sites without data
for site in nodata_sites:
print(",".join(map(
str,
[site.uid, site.name]
+ [site.sids.get(auth, "") for auth in seen_auths]
+ [site.latitude, site.longitude]
+ ["NO DATA"]
)))
|
Load metadata for all sites in given basin codes.
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/bin/acis_sites.py#L16-L118
|
[
"def get_huc8(prefix):\n \"\"\"\n Return all HUC8s matching the given prefix (e.g. 1801) or basin name\n (e.g. Klamath)\n \"\"\"\n if not prefix.isdigit():\n # Look up hucs by name\n name = prefix\n prefix = None\n for row in hucs:\n if row.basin.lower() == name.lower():\n # Use most general huc if two have the same name\n if prefix is None or len(row.huc) < len(prefix):\n prefix = row.huc\n\n if prefix is None:\n return []\n\n huc8s = []\n for row in hucs:\n # Return all 8-digit hucs with given prefix\n if len(row.huc) == 8 and row.huc.startswith(prefix):\n huc8s.append(row.huc)\n return huc8s\n"
] |
#!/usr/bin/env python
from __future__ import print_function
import sys
from datetime import date
from climata.acis import StationMetaIO
from climata.acis.constants import (
ELEMENT_BY_NAME, ELEMENT_BY_ID, ALL_META_FIELDS
)
elems = ELEMENT_BY_NAME.copy()
# Eloement 7 (pan evap) does not have a name, copy from ID listing
elems['7'] = ELEMENT_BY_ID['7']
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: acis_sites.py basin")
exit()
load_sites(*sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.