repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/appengine.py | download_command | python | def download_command(args):
latest_two_versions = list(reversed(get_gae_versions()))[:2]
zip = None
version_number = None
for version in latest_two_versions:
if is_existing_up_to_date(args.destination, version[0]):
print(
'App Engine SDK already exists and is up to date '
'at {}.'.format(args.destination))
return
try:
print('Downloading App Engine SDK {}'.format(
'.'.join([str(x) for x in version[0]])))
zip = download_sdk(version[1])
version_number = version[0]
break
except Exception as e:
print('Failed to download: {}'.format(e))
continue
if not zip:
return
print('Extracting SDK to {}'.format(args.destination))
extract_zip(zip, args.destination)
fixup_version(args.destination, version_number)
print('App Engine SDK installed.') | Downloads and extracts the latest App Engine SDK to the given
destination. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/appengine.py#L122-L155 | [
"def get_gae_versions():\n \"\"\"Gets a list of all of the available Python SDK versions, sorted with\n the newest last.\"\"\"\n r = requests.get(SDK_RELEASES_URL)\n r.raise_for_status()\n\n releases = r.json().get('items', {})\n\n # We only care about the Python releases, which all are in the format\n # \"featured/google_appengine_{version}.zip\". We'll extract the version\n # number so we can sort the list by version, and finally get the download\n # URL.\n versions_and_urls = []\n for release in releases:\n match = PYTHON_RELEASE_RE.match(release['name'])\n\n if not match:\n continue\n\n versions_and_urls.append(\n ([int(x) for x in match.groups()], release['mediaLink']))\n\n return sorted(versions_and_urls, key=lambda x: x[0])\n",
"def is_existing_up_to_date(destination, latest_version):\n \"\"\"Returns False if there is no existing install or if the existing install\n is out of date. Otherwise, returns True.\"\"\"\n version_path = os.path.join(\n destination, 'google_appengine', 'VERSION')\n\n if not os.path.exists(version_path):\n return False\n\n with open(version_path, 'r') as f:\n version_line = f.readline()\n\n match = SDK_RELEASE_RE.match(version_line)\n\n if not match:\n print('Unable to parse version from:', version_line)\n return False\n\n version = [int(x) for x in match.groups()]\n\n return version >= latest_version\n",
"def download_sdk(url):\n \"\"\"Downloads the SDK and returns a file-like object for the zip content.\"\"\"\n r = requests.get(url)\n r.raise_for_status()\n return StringIO(r.content)\n",
"def extract_zip(zip, destination):\n zip_contents = zipfile.ZipFile(zip)\n\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n zip_contents.extractall(destination)\n",
"def fixup_version(destination, version):\n \"\"\"Newer releases of the SDK do not have the version number set correctly\n in the VERSION file. Fix it up.\"\"\"\n version_path = os.path.join(\n destination, 'google_appengine', 'VERSION')\n\n with open(version_path, 'r') as f:\n version_data = f.read()\n\n version_data = version_data.replace(\n 'release: \"0.0.0\"',\n 'release: \"{}\"'.format('.'.join(str(x) for x in version)))\n\n with open(version_path, 'w') as f:\n f.write(version_data)\n"
] | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetches the most recent GAE SDK and extracts it to the given directory."""
from __future__ import print_function
import os
import re
import sys
import zipfile
import requests
if sys.version_info[0] == 2:
from StringIO import StringIO
elif sys.version_info[0] == 3:
from io import StringIO
SDK_RELEASES_URL = (
'https://www.googleapis.com/storage/v1/b/appengine-sdks/o?prefix=featured')
PYTHON_RELEASE_RE = re.compile(
r'featured/google_appengine_(\d+?)\.(\d+?)\.(\d+?)\.zip')
SDK_RELEASE_RE = re.compile(
r'release: \"(\d+?)\.(\d+?)\.(\d+?)\"')
def get_gae_versions():
"""Gets a list of all of the available Python SDK versions, sorted with
the newest last."""
r = requests.get(SDK_RELEASES_URL)
r.raise_for_status()
releases = r.json().get('items', {})
# We only care about the Python releases, which all are in the format
# "featured/google_appengine_{version}.zip". We'll extract the version
# number so we can sort the list by version, and finally get the download
# URL.
versions_and_urls = []
for release in releases:
match = PYTHON_RELEASE_RE.match(release['name'])
if not match:
continue
versions_and_urls.append(
([int(x) for x in match.groups()], release['mediaLink']))
return sorted(versions_and_urls, key=lambda x: x[0])
def is_existing_up_to_date(destination, latest_version):
"""Returns False if there is no existing install or if the existing install
is out of date. Otherwise, returns True."""
version_path = os.path.join(
destination, 'google_appengine', 'VERSION')
if not os.path.exists(version_path):
return False
with open(version_path, 'r') as f:
version_line = f.readline()
match = SDK_RELEASE_RE.match(version_line)
if not match:
print('Unable to parse version from:', version_line)
return False
version = [int(x) for x in match.groups()]
return version >= latest_version
def download_sdk(url):
"""Downloads the SDK and returns a file-like object for the zip content."""
r = requests.get(url)
r.raise_for_status()
return StringIO(r.content)
def extract_zip(zip, destination):
zip_contents = zipfile.ZipFile(zip)
if not os.path.exists(destination):
os.makedirs(destination)
zip_contents.extractall(destination)
def fixup_version(destination, version):
"""Newer releases of the SDK do not have the version number set correctly
in the VERSION file. Fix it up."""
version_path = os.path.join(
destination, 'google_appengine', 'VERSION')
with open(version_path, 'r') as f:
version_data = f.read()
version_data = version_data.replace(
'release: "0.0.0"',
'release: "{}"'.format('.'.join(str(x) for x in version)))
with open(version_path, 'w') as f:
f.write(version_data)
def register_commands(subparsers):
download = subparsers.add_parser(
'download-appengine-sdk',
help=download_command.__doc__)
download.set_defaults(func=download_command)
download.add_argument(
'destination',
help='Path to install the App Engine SDK')
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | get_package_info | python | def get_package_info(package):
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json() | Gets the PyPI information for a given package. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L30-L35 | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | read_requirements | python | def read_requirements(req_file):
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result | Reads a requirements file.
Args:
req_file (str): Filename of requirements file | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L38-L56 | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | _is_version_range | python | def _is_version_range(req):
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True | Returns true if requirements specify a version range. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L72-L82 | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | update_req | python | def update_req(req):
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None | Updates a given req object with the latest version. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L85-L112 | [
"def get_package_info(package):\n \"\"\"Gets the PyPI information for a given package.\"\"\"\n url = 'https://pypi.python.org/pypi/{}/json'.format(package)\n r = requests.get(url)\n r.raise_for_status()\n return r.json()\n",
"def _is_pinned(req):\n \"\"\"Returns true if requirements have some version specifiers.\"\"\"\n return (req.specifier is not None) and len(req.specifier) > 0\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | write_requirements | python | def write_requirements(reqs_linenum, req_file):
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines) | Writes a list of req objects out to a given file. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L115-L129 | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | check_req | python | def check_req(req):
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version | Checks if a given req is the latest version available. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L132-L146 | [
"def get_package_info(package):\n \"\"\"Gets the PyPI information for a given package.\"\"\"\n url = 'https://pypi.python.org/pypi/{}/json'.format(package)\n r = requests.get(url)\n r.raise_for_status()\n return r.json()\n",
"def _get_newest_version(info):\n versions = info['releases'].keys()\n versions = [packaging.version.parse(version) for version in versions]\n versions = [version for version in versions if not version.is_prerelease]\n latest = sorted(versions).pop()\n return latest\n",
"def _is_pinned(req):\n \"\"\"Returns true if requirements have some version specifiers.\"\"\"\n return (req.specifier is not None) and len(req.specifier) > 0\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | check_requirements_file | python | def check_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs | Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L162-L173 | [
"def read_requirements(req_file):\n \"\"\"Reads a requirements file.\n\n Args:\n req_file (str): Filename of requirements file\n \"\"\"\n items = list(parse_requirements(req_file, session={}))\n result = []\n\n for item in items:\n # Get line number from item\n line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]\n if item.req:\n item.req.marker = item.markers\n result.append((item.req, line_number))\n else:\n result.append((item, line_number))\n\n return result\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | update_command | python | def update_command(args):
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file)) | Updates all dependencies the specified requirements file. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L176-L188 | [
"def update_requirements_file(req_file, skip_packages):\n reqs = read_requirements(req_file)\n skipped = []\n if skip_packages is not None:\n skipped = [req for req in reqs if req[0].name in skip_packages]\n reqs = [req for req in reqs if req[0].name not in skip_packages]\n reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]\n\n updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]\n write_requirements(updated_reqs + skipped, req_file)\n return [x[1] for x in reqs_info_linenum if x[1]]\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def check_command(args):
"""Checks that all dependencies in the specified requirements file are
up to date."""
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/requirements.py | check_command | python | def check_command(args):
outdated = check_requirements_file(args.requirements_file,
args.skip_packages)
if outdated:
print('Requirements in {} are out of date:'.format(
args.requirements_file))
for item in outdated:
print(' * {} is {} latest is {}.'.format(*item))
sys.exit(1)
else:
print('Requirements in {} are up to date.'.format(
args.requirements_file)) | Checks that all dependencies in the specified requirements file are
up to date. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/requirements.py#L191-L207 | [
"def check_requirements_file(req_file, skip_packages):\n \"\"\"Return list of outdated requirements.\n\n Args:\n req_file (str): Filename of requirements file\n skip_packages (list): List of package names to ignore.\n \"\"\"\n reqs = read_requirements(req_file)\n if skip_packages is not None:\n reqs = [req for req in reqs if req.name not in skip_packages]\n outdated_reqs = filter(None, [check_req(req) for req in reqs])\n return outdated_reqs\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks and updates dependencies to ensure they are the latest version.
"""
import sys
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
import packaging.version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import requests
def get_package_info(package):
"""Gets the PyPI information for a given package."""
url = 'https://pypi.python.org/pypi/{}/json'.format(package)
r = requests.get(url)
r.raise_for_status()
return r.json()
def read_requirements(req_file):
"""Reads a requirements file.
Args:
req_file (str): Filename of requirements file
"""
items = list(parse_requirements(req_file, session={}))
result = []
for item in items:
# Get line number from item
line_number = item.comes_from.split(req_file + ' (line ')[1][:-1]
if item.req:
item.req.marker = item.markers
result.append((item.req, line_number))
else:
result.append((item, line_number))
return result
def _get_newest_version(info):
versions = info['releases'].keys()
versions = [packaging.version.parse(version) for version in versions]
versions = [version for version in versions if not version.is_prerelease]
latest = sorted(versions).pop()
return latest
def _is_pinned(req):
"""Returns true if requirements have some version specifiers."""
return (req.specifier is not None) and len(req.specifier) > 0
def _is_version_range(req):
"""Returns true if requirements specify a version range."""
assert len(req.specifier) > 0
specs = list(req.specifier)
if len(specs) == 1:
# "foo > 2.0" or "foo == 2.4.3"
return specs[0].operator != '=='
else:
# "foo > 2.0, < 3.0"
return True
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None
def write_requirements(reqs_linenum, req_file):
"""Writes a list of req objects out to a given file."""
with open(req_file, 'r') as input:
lines = input.readlines()
for req in reqs_linenum:
line_num = int(req[1])
if hasattr(req[0], 'link'):
lines[line_num - 1] = '{}\n'.format(req[0].link)
else:
lines[line_num - 1] = '{}\n'.format(req[0])
with open(req_file, 'w') as output:
output.writelines(lines)
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
def update_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
skipped = []
if skip_packages is not None:
skipped = [req for req in reqs if req[0].name in skip_packages]
reqs = [req for req in reqs if req[0].name not in skip_packages]
reqs_info_linenum = [update_req(req[0]) + (req[1],) for req in reqs]
updated_reqs = [(x[0], x[2]) for x in reqs_info_linenum]
write_requirements(updated_reqs + skipped, req_file)
return [x[1] for x in reqs_info_linenum if x[1]]
def check_requirements_file(req_file, skip_packages):
"""Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore.
"""
reqs = read_requirements(req_file)
if skip_packages is not None:
reqs = [req for req in reqs if req.name not in skip_packages]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs
def update_command(args):
"""Updates all dependencies the specified requirements file."""
updated = update_requirements_file(
args.requirements_file, args.skip_packages)
if updated:
print('Updated requirements in {}:'.format(args.requirements_file))
for item in updated:
print(' * {} from {} to {}.'.format(*item))
else:
print('All dependencies in {} are up-to-date.'.format(
args.requirements_file))
def register_commands(subparsers):
update = subparsers.add_parser(
'update-requirements',
help=update_command.__doc__)
update.set_defaults(func=update_command)
update.add_argument(
'requirements_file',
help='Path the the requirements.txt file to update.')
update.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check")
check = subparsers.add_parser(
'check-requirements',
help=check_command.__doc__)
check.set_defaults(func=check_command)
check.add_argument(
'requirements_file',
help='Path the the requirements.txt file to check.')
check.add_argument(
'--skip-packages', nargs='+',
help="List of packages to ignore during the check"
)
|
GoogleCloudPlatform/python-repo-tools | gcp_devrel/tools/__init__.py | main | python | def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
appengine.register_commands(subparsers)
requirements.register_commands(subparsers)
pylint.register_commands(subparsers)
args = parser.parse_args()
args.func(args) | Entrypoint for the console script gcp-devrel-py-tools. | train | https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/__init__.py#L22-L32 | [
"def register_commands(subparsers):\n download = subparsers.add_parser(\n 'download-appengine-sdk',\n help=download_command.__doc__)\n download.set_defaults(func=download_command)\n download.add_argument(\n 'destination',\n help='Path to install the App Engine SDK')\n",
"def register_commands(subparsers):\n parser = subparsers.add_parser(\n 'run-pylint', help=__doc__)\n parser.set_defaults(func=run_command)\n parser.add_argument('--config')\n parser.add_argument('--library-filesets', nargs='+', default=[])\n parser.add_argument('--test-filesets', nargs='+', default=[])\n",
"def register_commands(subparsers):\n update = subparsers.add_parser(\n 'update-requirements',\n help=update_command.__doc__)\n update.set_defaults(func=update_command)\n\n update.add_argument(\n 'requirements_file',\n help='Path the the requirements.txt file to update.')\n update.add_argument(\n '--skip-packages', nargs='+',\n help=\"List of packages to ignore during the check\")\n\n check = subparsers.add_parser(\n 'check-requirements',\n help=check_command.__doc__)\n check.set_defaults(func=check_command)\n check.add_argument(\n 'requirements_file',\n help='Path the the requirements.txt file to check.')\n check.add_argument(\n '--skip-packages', nargs='+',\n help=\"List of packages to ignore during the check\"\n )\n"
] | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from . import appengine
from . import pylint
from . import requirements
|
agoragames/chai | chai/exception.py | pretty_format_args | python | def pretty_format_args(*args, **kwargs):
args = list([repr(a) for a in args])
for key, value in kwargs.items():
args.append("%s=%s" % (key, repr(value)))
return "(%s)" % ", ".join([a for a in args]) | Take the args, and kwargs that are passed them and format in a
prototype style. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/exception.py#L16-L24 | null | '''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/chai/blob/master/LICENSE.txt
'''
from __future__ import absolute_import
import sys
import traceback
from ._termcolor import colored
# Refactored from ArgumentsExpectationRule
class ChaiException(RuntimeError):
'''
Base class for an actual error in chai.
'''
class UnsupportedStub(ChaiException):
'''
Can't stub the requested object or attribute.
'''
class UnsupportedModifier(ChaiException):
'''
Can't use the requested modifier.
'''
class ChaiAssertion(AssertionError):
'''
Base class for all assertion errors.
'''
class UnexpectedCall(BaseException):
'''
Raised when a unexpected call occurs to a stub.
'''
def __init__(self, msg=None, prefix=None, suffix=None, call=None,
args=None, kwargs=None, expected_args=None,
expected_kwargs=None):
if msg:
msg = colored('\n\n' + msg.strip(), 'red')
else:
msg = ''
if prefix:
msg = '\n\n' + prefix.strip() + msg
if call:
msg += colored('\n\nNo expectation in place for\n',
'white', attrs=['bold'])
msg += colored(call, 'red')
if args or kwargs:
msg += colored(pretty_format_args(*(args or ()),
**(kwargs or {})), 'red')
if expected_args or expected_kwargs:
msg += colored('\n\nExpected\n', 'white', attrs=['bold'])
msg += colored(call, 'red')
msg += colored(pretty_format_args(
*(expected_args or ()),
**(expected_kwargs or {})), 'red')
# If handling an exception, add printing of it here.
if sys.exc_info()[0]:
msg += colored('\n\nWhile handling\n', 'white', attrs=['bold'])
msg += colored(''.join(
traceback.format_exception(*sys.exc_info())),
'red')
if suffix:
msg = msg + '\n\n' + suffix.strip()
super(UnexpectedCall, self).__init__(msg)
class ExpectationNotSatisfied(ChaiAssertion):
'''
Raised when all expectations are not met
'''
def __init__(self, *expectations):
self._expectations = expectations
def __str__(self):
return str("\n".join([str(e) for e in self._expectations]))
|
agoragames/chai | chai/spy.py | Spy._call_spy | python | def _call_spy(self, *args, **kwargs):
'''
Wrapper to call the spied-on function. Operates similar to
Expectation.test.
'''
if self._spy_side_effect:
if self._spy_side_effect_args or self._spy_side_effect_kwargs:
self._spy_side_effect(
*self._spy_side_effect_args,
**self._spy_side_effect_kwargs)
else:
self._spy_side_effect(*args, **kwargs)
return_value = self._stub.call_orig(*args, **kwargs)
if self._spy_return:
self._spy_return(return_value)
return return_value | Wrapper to call the spied-on function. Operates similar to
Expectation.test. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/spy.py#L24-L41 | null | class Spy(Expectation):
def __init__(self, *args, **kwargs):
super(Spy, self).__init__(*args, **kwargs)
self._side_effect = self._call_spy
# To support side effects within spies
self._spy_side_effect = False
self._spy_side_effect_args = None
self._spy_side_effect_kwargs = None
self._spy_return = False
def side_effect(self, func, *args, **kwargs):
'''
Wrap side effects for spies.
'''
self._spy_side_effect = func
self._spy_side_effect_args = args
self._spy_side_effect_kwargs = kwargs
return self
def spy_return(self, func):
'''
Allow spies to react to return values.
'''
self._spy_return = func
return self
def returns(self, *args):
'''
Disable returns for spies.
'''
raise UnsupportedModifier("Can't use returns on spies")
def raises(self, *args):
'''
Disable raises for spies.
'''
raise UnsupportedModifier("Can't use raises on spies")
|
agoragames/chai | chai/spy.py | Spy.side_effect | python | def side_effect(self, func, *args, **kwargs):
'''
Wrap side effects for spies.
'''
self._spy_side_effect = func
self._spy_side_effect_args = args
self._spy_side_effect_kwargs = kwargs
return self | Wrap side effects for spies. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/spy.py#L43-L50 | null | class Spy(Expectation):
def __init__(self, *args, **kwargs):
super(Spy, self).__init__(*args, **kwargs)
self._side_effect = self._call_spy
# To support side effects within spies
self._spy_side_effect = False
self._spy_side_effect_args = None
self._spy_side_effect_kwargs = None
self._spy_return = False
def _call_spy(self, *args, **kwargs):
'''
Wrapper to call the spied-on function. Operates similar to
Expectation.test.
'''
if self._spy_side_effect:
if self._spy_side_effect_args or self._spy_side_effect_kwargs:
self._spy_side_effect(
*self._spy_side_effect_args,
**self._spy_side_effect_kwargs)
else:
self._spy_side_effect(*args, **kwargs)
return_value = self._stub.call_orig(*args, **kwargs)
if self._spy_return:
self._spy_return(return_value)
return return_value
def spy_return(self, func):
'''
Allow spies to react to return values.
'''
self._spy_return = func
return self
def returns(self, *args):
'''
Disable returns for spies.
'''
raise UnsupportedModifier("Can't use returns on spies")
def raises(self, *args):
'''
Disable raises for spies.
'''
raise UnsupportedModifier("Can't use raises on spies")
|
agoragames/chai | chai/stub.py | _stub_attr | python | def _stub_attr(obj, attr_name):
'''
Stub an attribute of an object. Will return an existing stub if
there already is one.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Check to see if this a property, this check is only for when dealing
# with an instance. getattr will work for classes.
is_property = False
if not inspect.isclass(obj) and not inspect.ismodule(obj):
# It's possible that the attribute is defined after initialization, and
# so is not on the class itself.
attr = getattr(obj.__class__, attr_name, None)
if isinstance(attr, property):
is_property = True
if not is_property:
attr = getattr(obj, attr_name)
# Return an existing stub
if isinstance(attr, Stub):
return attr
# If a Mock object, stub its __call__
if isinstance(attr, Mock):
return stub(attr.__call__)
if isinstance(attr, property):
return StubProperty(obj, attr_name)
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if inspect.ismodule(obj) and isinstance(attr, (types.FunctionType,
types.BuiltinFunctionType,
types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# In python3 unbound methods are treated as functions with no reference
# back to the parent class and no im_* fields. We can still make unbound
# methods work by passing these through to the stub
if inspect.isclass(obj) and isinstance(attr, types.FunctionType):
return StubUnboundMethod(obj, attr_name)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(attr, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(attr, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(attr, '__self__'):
if attr.__self__ is not None:
return StubMethod(obj, attr_name)
if sys.version_info.major == 2:
return StubUnboundMethod(attr)
else:
return StubMethod(obj, attr_name)
if isinstance(attr, (types.BuiltinFunctionType, types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# What an absurd type this is ....
if type(attr).__name__ == 'method-wrapper':
return StubMethodWrapper(attr)
# This is also slot_descriptor
if type(attr).__name__ == 'wrapper_descriptor':
return StubWrapperDescriptor(obj, attr_name)
raise UnsupportedStub(
"can't stub %s(%s) of %s", attr_name, type(attr), obj) | Stub an attribute of an object. Will return an existing stub if
there already is one. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L32-L105 | [
"def stub(obj, attr=None):\n '''\n Stub an object. If attr is not None, will attempt to stub that attribute\n on the object. Only required for modules and other rare cases where we\n can't determine the binding from the object.\n '''\n if attr:\n return _stub_attr(obj, attr)\n else:\n return _stub_obj(obj)\n"
] | '''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/chai/blob/master/LICENSE.txt
'''
import inspect
import types
import sys
import gc
from .expectation import Expectation
from .spy import Spy
from .exception import *
from ._termcolor import colored
# For clarity here and in tests, could make these class or static methods on
# Stub. Chai base class would hide that.
def stub(obj, attr=None):
'''
Stub an object. If attr is not None, will attempt to stub that attribute
on the object. Only required for modules and other rare cases where we
can't determine the binding from the object.
'''
if attr:
return _stub_attr(obj, attr)
else:
return _stub_obj(obj)
def _stub_obj(obj):
'''
Stub an object directly.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Return an existing stub
if isinstance(obj, Stub):
return obj
# If a Mock object, stub its __call__
if isinstance(obj, Mock):
return stub(obj.__call__)
# If passed-in a type, assume that we're going to stub out the creation.
# See StubNew for the awesome sauce.
# if isinstance(obj, types.TypeType):
if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):
return StubNew(obj)
elif hasattr(__builtins__, 'type') and \
isinstance(obj, __builtins__.type):
return StubNew(obj)
elif inspect.isclass(obj):
return StubNew(obj)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(obj, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(obj, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(obj, '__self__'):
if obj.__self__ is not None:
return StubMethod(obj)
if sys.version_info.major == 2:
return StubUnboundMethod(obj)
else:
return StubMethod(obj)
# These aren't in the types library
if type(obj).__name__ == 'method-wrapper':
return StubMethodWrapper(obj)
if type(obj).__name__ == 'wrapper_descriptor':
raise UnsupportedStub(
"must call stub(obj,'%s') for slot wrapper on %s",
obj.__name__, obj.__objclass__.__name__)
# (Mostly) Lastly, look for properties.
# First look for the situation where there's a reference back to the
# property.
prop = obj
if isinstance(getattr(obj, '__self__', None), property):
obj = prop.__self__
# Once we've found a property, we have to figure out how to reference
# back to the owning class. This is a giant pain and we have to use gc
# to find out where it comes from. This code is dense but resolves to
# something like this:
# >>> gc.get_referrers( foo.x )
# [{'__dict__': <attribute '__dict__' of 'foo' objects>,
# 'x': <property object at 0x7f68c99a16d8>,
# '__module__': '__main__',
# '__weakref__': <attribute '__weakref__' of 'foo' objects>,
# '__doc__': None}]
if isinstance(obj, property):
klass, attr = None, None
for ref in gc.get_referrers(obj):
if klass and attr:
break
if isinstance(ref, dict) and ref.get('prop', None) is obj:
klass = getattr(
ref.get('__dict__', None), '__objclass__', None)
for name, val in getattr(klass, '__dict__', {}).items():
if val is obj:
attr = name
break
# In the case of PyPy, we have to check all types that refer to
# the property, and see if any of their attrs are the property
elif isinstance(ref, type):
# Use dir as a means to quickly walk through the class tree
for name in dir(ref):
if getattr(ref, name) == obj:
klass = ref
attr = name
break
if klass and attr:
rval = stub(klass, attr)
if prop != obj:
return stub(rval, prop.__name__)
return rval
# If a function and it has an associated module, we can mock directly.
# Note that this *must* be after properties, otherwise it conflicts with
# stubbing out the deleter methods and such
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.BuiltinMethodType)) and hasattr(obj, '__module__'):
return StubFunction(obj)
raise UnsupportedStub("can't stub %s", obj)
class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
class StubProperty(Stub, property):
'''
Property stubbing.
'''
def __init__(self, obj, attr):
super(StubProperty, self).__init__(obj, attr)
property.__init__(self, lambda x: self(),
lambda x, val: self.setter(val),
lambda x: self.deleter())
# In order to stub out a property we have ask the class for the
# propery object that was created we python execute class code.
if inspect.isclass(obj):
self._instance = obj
else:
self._instance = obj.__class__
# Use a simple Mock object for the deleter and setter. Use same
# namespace as property type so that it simply works.
# Annoying circular reference requires importing here. Would like to
# see this cleaned up. @AW
from .mock import Mock
self._obj = getattr(self._instance, attr)
self.setter = Mock()
self.deleter = Mock()
setattr(self._instance, self._attr, self)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: this is probably the most complicated one to implement. Will
# figure it out eventually.
raise NotImplementedError("property spies are not supported")
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethod(Stub):
'''
Stub a method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object of type MethodType
'''
super(StubMethod, self).__init__(obj, attr)
if not self._attr:
# python3
if sys.version_info.major == 3: # hasattr(obj,'__func__'):
self._attr = obj.__func__.__name__
else:
self._attr = obj.im_func.func_name
if sys.version_info.major == 3: # hasattr(obj, '__self__'):
self._instance = obj.__self__
else:
self._instance = obj.im_self
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
setattr(self._instance, self._attr, self)
@property
def name(self):
from .mock import Mock # Import here for the same reason as above.
if hasattr(self._obj, 'im_class'):
if issubclass(self._obj.im_class, Mock):
return self._obj.im_self._name
# Always use the class to get the name
klass = self._instance
if not inspect.isclass(self._instance):
klass = self._instance.__class__
return "%s.%s" % (klass.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
return self._obj.__func__(self._instance, *args, **kwargs)
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
return self._obj.im_func(self._instance, *args, **kwargs)
else:
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class.
'''
# Figure out if this is a class method and we're unstubbing it on the
# class to which it belongs. This addresses an edge case where a
# module can expose a method of an instance. e.g gevent.
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
setattr(
self._instance, self._attr, classmethod(self._obj.__func__))
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
# Wrap it and set it back on the class
setattr(self._instance, self._attr, classmethod(self._obj.im_func))
else:
setattr(self._instance, self._attr, self._obj)
class StubFunction(Stub):
'''
Stub a function.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
super(StubFunction, self).__init__(obj, attr)
if not self._attr:
if getattr(obj, '__module__', None):
self._instance = sys.modules[obj.__module__]
elif getattr(obj, '__self__', None):
self._instance = obj.__self__
else:
raise UnsupportedStub("Failed to find instance of %s" % (obj))
if getattr(obj, 'func_name', None):
self._attr = obj.func_name
elif getattr(obj, '__name__', None):
self._attr = obj.__name__
else:
raise UnsupportedStub("Failed to find name of %s" % (obj))
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
# This handles the case where we're stubbing a special method that's
# inherited from object, and so instead of calling setattr on teardown,
# we want to call delattr. This is particularly important for not
# seeing those stupid DeprecationWarnings after StubNew
self._was_object_method = False
if hasattr(self._instance, '__dict__'):
self._was_object_method = \
self._attr not in self._instance.__dict__.keys() and\
self._attr in object.__dict__.keys()
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Does this change if was_object_method?
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
if not self._was_object_method:
setattr(self._instance, self._attr, self._obj)
else:
delattr(self._instance, self._attr)
class StubNew(StubFunction):
'''
Stub out the constructor, but hide the fact that we're stubbing "__new__"
and act more like we're stubbing "__init__". Needs to use the logic in
the StubFunction ctor.
'''
_cache = {}
def __new__(self, klass, *args):
'''
Because we're not saving the stub into any attribute, then we have
to do some faking here to return the same handle.
'''
rval = self._cache.get(klass)
if not rval:
rval = self._cache[klass] = super(
StubNew, self).__new__(self, *args)
rval._allow_init = True
else:
rval._allow_init = False
return rval
def __init__(self, obj):
'''
Overload the initialization so that we can hack access to __new__.
'''
if self._allow_init:
self._new = obj.__new__
super(StubNew, self).__init__(obj, '__new__')
self._type = obj
def __call__(self, *args, **kwargs):
'''
When calling the new function, strip out the first arg which is
the type. In this way, the mocker writes their expectation as if it
was an __init__.
'''
return super(StubNew, self).__call__(*(args[1:]), **kwargs)
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval
def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type)
class StubUnboundMethod(Stub):
'''
Stub an unbound method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
# Note: It doesn't appear that there's any way to support stubbing
# by method in python3 because an unbound method has no reference
# to its parent class, it just looks like a regular function
super(StubUnboundMethod, self).__init__(obj, attr)
if self._attr is None:
self._instance = obj.im_class
self._attr = obj.im_func.func_name
else:
self._obj = getattr(obj, attr)
self._instance = obj
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Figure out if this can be implemented. The challenge is that
# the context of "self" has to be passed in as an argument, but there's
# not necessarily a generic way of doing that. It may fall out as a
# side-effect of the actual implementation of spies.
raise NotImplementedError("unbound method spies are not supported")
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethodWrapper(Stub):
'''
Stub a method-wrapper.
'''
def __init__(self, obj):
'''
Initialize with an object that is a method wrapper.
'''
super(StubMethodWrapper, self).__init__(obj)
self._instance = obj.__self__
self._attr = obj.__name__
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__class__.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubWrapperDescriptor(Stub):
'''
Stub a wrapper-descriptor. Only works when we can fetch it by name. Because
the w-d object doesn't contain both the instance ref and the attribute name
to be able to look it up. Used for mocking object.__init__ and related
builtin methods when subclasses that don't overload those.
'''
def __init__(self, obj, attr_name):
'''
Initialize with an object that is a method wrapper.
'''
super(StubWrapperDescriptor, self).__init__(obj, attr_name)
self._orig = getattr(self._obj, self._attr)
setattr(self._obj, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._obj.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._obj, self._attr, self._orig)
|
agoragames/chai | chai/stub.py | _stub_obj | python | def _stub_obj(obj):
'''
Stub an object directly.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Return an existing stub
if isinstance(obj, Stub):
return obj
# If a Mock object, stub its __call__
if isinstance(obj, Mock):
return stub(obj.__call__)
# If passed-in a type, assume that we're going to stub out the creation.
# See StubNew for the awesome sauce.
# if isinstance(obj, types.TypeType):
if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):
return StubNew(obj)
elif hasattr(__builtins__, 'type') and \
isinstance(obj, __builtins__.type):
return StubNew(obj)
elif inspect.isclass(obj):
return StubNew(obj)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(obj, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(obj, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(obj, '__self__'):
if obj.__self__ is not None:
return StubMethod(obj)
if sys.version_info.major == 2:
return StubUnboundMethod(obj)
else:
return StubMethod(obj)
# These aren't in the types library
if type(obj).__name__ == 'method-wrapper':
return StubMethodWrapper(obj)
if type(obj).__name__ == 'wrapper_descriptor':
raise UnsupportedStub(
"must call stub(obj,'%s') for slot wrapper on %s",
obj.__name__, obj.__objclass__.__name__)
# (Mostly) Lastly, look for properties.
# First look for the situation where there's a reference back to the
# property.
prop = obj
if isinstance(getattr(obj, '__self__', None), property):
obj = prop.__self__
# Once we've found a property, we have to figure out how to reference
# back to the owning class. This is a giant pain and we have to use gc
# to find out where it comes from. This code is dense but resolves to
# something like this:
# >>> gc.get_referrers( foo.x )
# [{'__dict__': <attribute '__dict__' of 'foo' objects>,
# 'x': <property object at 0x7f68c99a16d8>,
# '__module__': '__main__',
# '__weakref__': <attribute '__weakref__' of 'foo' objects>,
# '__doc__': None}]
if isinstance(obj, property):
klass, attr = None, None
for ref in gc.get_referrers(obj):
if klass and attr:
break
if isinstance(ref, dict) and ref.get('prop', None) is obj:
klass = getattr(
ref.get('__dict__', None), '__objclass__', None)
for name, val in getattr(klass, '__dict__', {}).items():
if val is obj:
attr = name
break
# In the case of PyPy, we have to check all types that refer to
# the property, and see if any of their attrs are the property
elif isinstance(ref, type):
# Use dir as a means to quickly walk through the class tree
for name in dir(ref):
if getattr(ref, name) == obj:
klass = ref
attr = name
break
if klass and attr:
rval = stub(klass, attr)
if prop != obj:
return stub(rval, prop.__name__)
return rval
# If a function and it has an associated module, we can mock directly.
# Note that this *must* be after properties, otherwise it conflicts with
# stubbing out the deleter methods and such
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.BuiltinMethodType)) and hasattr(obj, '__module__'):
return StubFunction(obj)
raise UnsupportedStub("can't stub %s", obj) | Stub an object directly. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L108-L212 | [
"def stub(obj, attr=None):\n '''\n Stub an object. If attr is not None, will attempt to stub that attribute\n on the object. Only required for modules and other rare cases where we\n can't determine the binding from the object.\n '''\n if attr:\n return _stub_attr(obj, attr)\n else:\n return _stub_obj(obj)\n"
] | '''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/chai/blob/master/LICENSE.txt
'''
import inspect
import types
import sys
import gc
from .expectation import Expectation
from .spy import Spy
from .exception import *
from ._termcolor import colored
# For clarity here and in tests, could make these class or static methods on
# Stub. Chai base class would hide that.
def stub(obj, attr=None):
'''
Stub an object. If attr is not None, will attempt to stub that attribute
on the object. Only required for modules and other rare cases where we
can't determine the binding from the object.
'''
if attr:
return _stub_attr(obj, attr)
else:
return _stub_obj(obj)
def _stub_attr(obj, attr_name):
'''
Stub an attribute of an object. Will return an existing stub if
there already is one.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Check to see if this a property, this check is only for when dealing
# with an instance. getattr will work for classes.
is_property = False
if not inspect.isclass(obj) and not inspect.ismodule(obj):
# It's possible that the attribute is defined after initialization, and
# so is not on the class itself.
attr = getattr(obj.__class__, attr_name, None)
if isinstance(attr, property):
is_property = True
if not is_property:
attr = getattr(obj, attr_name)
# Return an existing stub
if isinstance(attr, Stub):
return attr
# If a Mock object, stub its __call__
if isinstance(attr, Mock):
return stub(attr.__call__)
if isinstance(attr, property):
return StubProperty(obj, attr_name)
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if inspect.ismodule(obj) and isinstance(attr, (types.FunctionType,
types.BuiltinFunctionType,
types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# In python3 unbound methods are treated as functions with no reference
# back to the parent class and no im_* fields. We can still make unbound
# methods work by passing these through to the stub
if inspect.isclass(obj) and isinstance(attr, types.FunctionType):
return StubUnboundMethod(obj, attr_name)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(attr, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(attr, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(attr, '__self__'):
if attr.__self__ is not None:
return StubMethod(obj, attr_name)
if sys.version_info.major == 2:
return StubUnboundMethod(attr)
else:
return StubMethod(obj, attr_name)
if isinstance(attr, (types.BuiltinFunctionType, types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# What an absurd type this is ....
if type(attr).__name__ == 'method-wrapper':
return StubMethodWrapper(attr)
# This is also slot_descriptor
if type(attr).__name__ == 'wrapper_descriptor':
return StubWrapperDescriptor(obj, attr_name)
raise UnsupportedStub(
"can't stub %s(%s) of %s", attr_name, type(attr), obj)
class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
class StubProperty(Stub, property):
'''
Property stubbing.
'''
def __init__(self, obj, attr):
super(StubProperty, self).__init__(obj, attr)
property.__init__(self, lambda x: self(),
lambda x, val: self.setter(val),
lambda x: self.deleter())
# In order to stub out a property we have ask the class for the
# propery object that was created we python execute class code.
if inspect.isclass(obj):
self._instance = obj
else:
self._instance = obj.__class__
# Use a simple Mock object for the deleter and setter. Use same
# namespace as property type so that it simply works.
# Annoying circular reference requires importing here. Would like to
# see this cleaned up. @AW
from .mock import Mock
self._obj = getattr(self._instance, attr)
self.setter = Mock()
self.deleter = Mock()
setattr(self._instance, self._attr, self)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: this is probably the most complicated one to implement. Will
# figure it out eventually.
raise NotImplementedError("property spies are not supported")
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethod(Stub):
'''
Stub a method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object of type MethodType
'''
super(StubMethod, self).__init__(obj, attr)
if not self._attr:
# python3
if sys.version_info.major == 3: # hasattr(obj,'__func__'):
self._attr = obj.__func__.__name__
else:
self._attr = obj.im_func.func_name
if sys.version_info.major == 3: # hasattr(obj, '__self__'):
self._instance = obj.__self__
else:
self._instance = obj.im_self
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
setattr(self._instance, self._attr, self)
@property
def name(self):
from .mock import Mock # Import here for the same reason as above.
if hasattr(self._obj, 'im_class'):
if issubclass(self._obj.im_class, Mock):
return self._obj.im_self._name
# Always use the class to get the name
klass = self._instance
if not inspect.isclass(self._instance):
klass = self._instance.__class__
return "%s.%s" % (klass.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
return self._obj.__func__(self._instance, *args, **kwargs)
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
return self._obj.im_func(self._instance, *args, **kwargs)
else:
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class.
'''
# Figure out if this is a class method and we're unstubbing it on the
# class to which it belongs. This addresses an edge case where a
# module can expose a method of an instance. e.g gevent.
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
setattr(
self._instance, self._attr, classmethod(self._obj.__func__))
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
# Wrap it and set it back on the class
setattr(self._instance, self._attr, classmethod(self._obj.im_func))
else:
setattr(self._instance, self._attr, self._obj)
class StubFunction(Stub):
'''
Stub a function.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
super(StubFunction, self).__init__(obj, attr)
if not self._attr:
if getattr(obj, '__module__', None):
self._instance = sys.modules[obj.__module__]
elif getattr(obj, '__self__', None):
self._instance = obj.__self__
else:
raise UnsupportedStub("Failed to find instance of %s" % (obj))
if getattr(obj, 'func_name', None):
self._attr = obj.func_name
elif getattr(obj, '__name__', None):
self._attr = obj.__name__
else:
raise UnsupportedStub("Failed to find name of %s" % (obj))
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
# This handles the case where we're stubbing a special method that's
# inherited from object, and so instead of calling setattr on teardown,
# we want to call delattr. This is particularly important for not
# seeing those stupid DeprecationWarnings after StubNew
self._was_object_method = False
if hasattr(self._instance, '__dict__'):
self._was_object_method = \
self._attr not in self._instance.__dict__.keys() and\
self._attr in object.__dict__.keys()
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Does this change if was_object_method?
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
if not self._was_object_method:
setattr(self._instance, self._attr, self._obj)
else:
delattr(self._instance, self._attr)
class StubNew(StubFunction):
'''
Stub out the constructor, but hide the fact that we're stubbing "__new__"
and act more like we're stubbing "__init__". Needs to use the logic in
the StubFunction ctor.
'''
_cache = {}
def __new__(self, klass, *args):
'''
Because we're not saving the stub into any attribute, then we have
to do some faking here to return the same handle.
'''
rval = self._cache.get(klass)
if not rval:
rval = self._cache[klass] = super(
StubNew, self).__new__(self, *args)
rval._allow_init = True
else:
rval._allow_init = False
return rval
def __init__(self, obj):
'''
Overload the initialization so that we can hack access to __new__.
'''
if self._allow_init:
self._new = obj.__new__
super(StubNew, self).__init__(obj, '__new__')
self._type = obj
def __call__(self, *args, **kwargs):
'''
When calling the new function, strip out the first arg which is
the type. In this way, the mocker writes their expectation as if it
was an __init__.
'''
return super(StubNew, self).__call__(*(args[1:]), **kwargs)
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval
def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type)
class StubUnboundMethod(Stub):
'''
Stub an unbound method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
# Note: It doesn't appear that there's any way to support stubbing
# by method in python3 because an unbound method has no reference
# to its parent class, it just looks like a regular function
super(StubUnboundMethod, self).__init__(obj, attr)
if self._attr is None:
self._instance = obj.im_class
self._attr = obj.im_func.func_name
else:
self._obj = getattr(obj, attr)
self._instance = obj
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Figure out if this can be implemented. The challenge is that
# the context of "self" has to be passed in as an argument, but there's
# not necessarily a generic way of doing that. It may fall out as a
# side-effect of the actual implementation of spies.
raise NotImplementedError("unbound method spies are not supported")
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethodWrapper(Stub):
'''
Stub a method-wrapper.
'''
def __init__(self, obj):
'''
Initialize with an object that is a method wrapper.
'''
super(StubMethodWrapper, self).__init__(obj)
self._instance = obj.__self__
self._attr = obj.__name__
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__class__.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubWrapperDescriptor(Stub):
'''
Stub a wrapper-descriptor. Only works when we can fetch it by name. Because
the w-d object doesn't contain both the instance ref and the attribute name
to be able to look it up. Used for mocking object.__init__ and related
builtin methods when subclasses that don't overload those.
'''
def __init__(self, obj, attr_name):
'''
Initialize with an object that is a method wrapper.
'''
super(StubWrapperDescriptor, self).__init__(obj, attr_name)
self._orig = getattr(self._obj, self._attr)
setattr(self._obj, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._obj.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._obj, self._attr, self._orig)
|
agoragames/chai | chai/stub.py | Stub.unmet_expectations | python | def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet | Assert that all expectations on the stub have been met. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L238-L246 | null | class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
|
agoragames/chai | chai/stub.py | Stub.teardown | python | def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown() | Clean up all expectations and restore the original attribute of the
mocked object. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L248-L256 | [
"def _teardown(self):\n '''\n Hook for subclasses to teardown their stubs. Called only once.\n '''\n",
"def _teardown(self):\n '''\n Put the original method back in place. This will also handle the\n special case when it putting back a class method.\n\n The following code snippet best describe why it fails using settar,\n the class method would be replaced with a bound method not a class\n method.\n\n >>> class Example(object):\n ... @classmethod\n ... def a_classmethod(self):\n ... pass\n ...\n >>> Example.__dict__['a_classmethod']\n <classmethod object at 0x7f5e6c298be8>\n >>> orig = getattr(Example, 'a_classmethod')\n >>> orig\n <bound method type.a_classmethod of <class '__main__.Example'>>\n >>> setattr(Example, 'a_classmethod', orig)\n >>> Example.__dict__['a_classmethod']\n <bound method type.a_classmethod of <class '__main__.Example'>>\n\n The only way to figure out if this is a class method is to check and\n see if the bound method im_self is a class, if so then we need to wrap\n the function object (im_func) with class method before setting it back\n on the class.\n '''\n # Figure out if this is a class method and we're unstubbing it on the\n # class to which it belongs. This addresses an edge case where a\n # module can expose a method of an instance. e.g gevent.\n if hasattr(self._obj, '__self__') and \\\n inspect.isclass(self._obj.__self__) and \\\n self._obj.__self__ is self._instance:\n setattr(\n self._instance, self._attr, classmethod(self._obj.__func__))\n elif hasattr(self._obj, 'im_self') and \\\n inspect.isclass(self._obj.im_self) and \\\n self._obj.im_self is self._instance:\n # Wrap it and set it back on the class\n setattr(self._instance, self._attr, classmethod(self._obj.im_func))\n else:\n setattr(self._instance, self._attr, self._obj)\n",
"def _teardown(self):\n '''\n Replace the original method.\n '''\n if not self._was_object_method:\n setattr(self._instance, self._attr, self._obj)\n else:\n delattr(self._instance, self._attr)\n",
"def _teardown(self):\n '''\n Replace the original method.\n '''\n setattr(self._instance, self._attr, self._obj)\n",
"def _teardown(self):\n '''\n Replace the original method.\n '''\n setattr(self._instance, self._attr, self._obj)\n",
"def _teardown(self):\n '''\n Replace the original method.\n '''\n setattr(self._obj, self._attr, self._orig)\n"
] | class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
|
agoragames/chai | chai/stub.py | Stub.expect | python | def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp | Add an expectation to this stub. Return the expectation. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L263-L269 | null | class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
|
agoragames/chai | chai/stub.py | Stub.spy | python | def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy | Add a spy to this stub. Return the spy. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L271-L277 | null | class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
|
agoragames/chai | chai/stub.py | StubMethod.call_orig | python | def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
return self._obj.__func__(self._instance, *args, **kwargs)
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
return self._obj.im_func(self._instance, *args, **kwargs)
else:
return self._obj(*args, **kwargs) | Calls the original function. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L407-L420 | null | class StubMethod(Stub):
'''
Stub a method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object of type MethodType
'''
super(StubMethod, self).__init__(obj, attr)
if not self._attr:
# python3
if sys.version_info.major == 3: # hasattr(obj,'__func__'):
self._attr = obj.__func__.__name__
else:
self._attr = obj.im_func.func_name
if sys.version_info.major == 3: # hasattr(obj, '__self__'):
self._instance = obj.__self__
else:
self._instance = obj.im_self
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
setattr(self._instance, self._attr, self)
@property
def name(self):
from .mock import Mock # Import here for the same reason as above.
if hasattr(self._obj, 'im_class'):
if issubclass(self._obj.im_class, Mock):
return self._obj.im_self._name
# Always use the class to get the name
klass = self._instance
if not inspect.isclass(self._instance):
klass = self._instance.__class__
return "%s.%s" % (klass.__name__, self._attr)
def _teardown(self):
'''
Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class.
'''
# Figure out if this is a class method and we're unstubbing it on the
# class to which it belongs. This addresses an edge case where a
# module can expose a method of an instance. e.g gevent.
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
setattr(
self._instance, self._attr, classmethod(self._obj.__func__))
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
# Wrap it and set it back on the class
setattr(self._instance, self._attr, classmethod(self._obj.im_func))
else:
setattr(self._instance, self._attr, self._obj)
|
agoragames/chai | chai/stub.py | StubMethod._teardown | python | def _teardown(self):
'''
Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class.
'''
# Figure out if this is a class method and we're unstubbing it on the
# class to which it belongs. This addresses an edge case where a
# module can expose a method of an instance. e.g gevent.
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
setattr(
self._instance, self._attr, classmethod(self._obj.__func__))
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
# Wrap it and set it back on the class
setattr(self._instance, self._attr, classmethod(self._obj.im_func))
else:
setattr(self._instance, self._attr, self._obj) | Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L422-L464 | null | class StubMethod(Stub):
'''
Stub a method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object of type MethodType
'''
super(StubMethod, self).__init__(obj, attr)
if not self._attr:
# python3
if sys.version_info.major == 3: # hasattr(obj,'__func__'):
self._attr = obj.__func__.__name__
else:
self._attr = obj.im_func.func_name
if sys.version_info.major == 3: # hasattr(obj, '__self__'):
self._instance = obj.__self__
else:
self._instance = obj.im_self
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
setattr(self._instance, self._attr, self)
@property
def name(self):
from .mock import Mock # Import here for the same reason as above.
if hasattr(self._obj, 'im_class'):
if issubclass(self._obj.im_class, Mock):
return self._obj.im_self._name
# Always use the class to get the name
klass = self._instance
if not inspect.isclass(self._instance):
klass = self._instance.__class__
return "%s.%s" % (klass.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
return self._obj.__func__(self._instance, *args, **kwargs)
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
return self._obj.im_func(self._instance, *args, **kwargs)
else:
return self._obj(*args, **kwargs)
|
agoragames/chai | chai/stub.py | StubFunction._teardown | python | def _teardown(self):
'''
Replace the original method.
'''
if not self._was_object_method:
setattr(self._instance, self._attr, self._obj)
else:
delattr(self._instance, self._attr) | Replace the original method. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L518-L525 | null | class StubFunction(Stub):
'''
Stub a function.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
super(StubFunction, self).__init__(obj, attr)
if not self._attr:
if getattr(obj, '__module__', None):
self._instance = sys.modules[obj.__module__]
elif getattr(obj, '__self__', None):
self._instance = obj.__self__
else:
raise UnsupportedStub("Failed to find instance of %s" % (obj))
if getattr(obj, 'func_name', None):
self._attr = obj.func_name
elif getattr(obj, '__name__', None):
self._attr = obj.__name__
else:
raise UnsupportedStub("Failed to find name of %s" % (obj))
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
# This handles the case where we're stubbing a special method that's
# inherited from object, and so instead of calling setattr on teardown,
# we want to call delattr. This is particularly important for not
# seeing those stupid DeprecationWarnings after StubNew
self._was_object_method = False
if hasattr(self._instance, '__dict__'):
self._was_object_method = \
self._attr not in self._instance.__dict__.keys() and\
self._attr in object.__dict__.keys()
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Does this change if was_object_method?
return self._obj(*args, **kwargs)
|
agoragames/chai | chai/stub.py | StubNew.call_orig | python | def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval | Calls the original function. Simulates __new__ and __init__ together. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L568-L574 | [
"def call_orig(self, *args, **kwargs):\n '''\n Calls the original function.\n '''\n # TODO: Does this change if was_object_method?\n return self._obj(*args, **kwargs)\n"
] | class StubNew(StubFunction):
'''
Stub out the constructor, but hide the fact that we're stubbing "__new__"
and act more like we're stubbing "__init__". Needs to use the logic in
the StubFunction ctor.
'''
_cache = {}
def __new__(self, klass, *args):
'''
Because we're not saving the stub into any attribute, then we have
to do some faking here to return the same handle.
'''
rval = self._cache.get(klass)
if not rval:
rval = self._cache[klass] = super(
StubNew, self).__new__(self, *args)
rval._allow_init = True
else:
rval._allow_init = False
return rval
def __init__(self, obj):
'''
Overload the initialization so that we can hack access to __new__.
'''
if self._allow_init:
self._new = obj.__new__
super(StubNew, self).__init__(obj, '__new__')
self._type = obj
def __call__(self, *args, **kwargs):
'''
When calling the new function, strip out the first arg which is
the type. In this way, the mocker writes their expectation as if it
was an __init__.
'''
return super(StubNew, self).__call__(*(args[1:]), **kwargs)
def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type)
|
agoragames/chai | chai/stub.py | StubNew._teardown | python | def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type) | Overload so that we can clear out the cache after a test run. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L576-L587 | null | class StubNew(StubFunction):
'''
Stub out the constructor, but hide the fact that we're stubbing "__new__"
and act more like we're stubbing "__init__". Needs to use the logic in
the StubFunction ctor.
'''
_cache = {}
def __new__(self, klass, *args):
'''
Because we're not saving the stub into any attribute, then we have
to do some faking here to return the same handle.
'''
rval = self._cache.get(klass)
if not rval:
rval = self._cache[klass] = super(
StubNew, self).__new__(self, *args)
rval._allow_init = True
else:
rval._allow_init = False
return rval
def __init__(self, obj):
'''
Overload the initialization so that we can hack access to __new__.
'''
if self._allow_init:
self._new = obj.__new__
super(StubNew, self).__init__(obj, '__new__')
self._type = obj
def __call__(self, *args, **kwargs):
'''
When calling the new function, strip out the first arg which is
the type. In this way, the mocker writes their expectation as if it
was an __init__.
'''
return super(StubNew, self).__call__(*(args[1:]), **kwargs)
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval
|
agoragames/chai | chai/stub.py | StubWrapperDescriptor.call_orig | python | def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs) | Calls the original function. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/stub.py#L686-L690 | null | class StubWrapperDescriptor(Stub):
'''
Stub a wrapper-descriptor. Only works when we can fetch it by name. Because
the w-d object doesn't contain both the instance ref and the attribute name
to be able to look it up. Used for mocking object.__init__ and related
builtin methods when subclasses that don't overload those.
'''
def __init__(self, obj, attr_name):
'''
Initialize with an object that is a method wrapper.
'''
super(StubWrapperDescriptor, self).__init__(obj, attr_name)
self._orig = getattr(self._obj, self._attr)
setattr(self._obj, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._obj.__name__, self._attr)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._obj, self._attr, self._orig)
|
agoragames/chai | chai/chai.py | ChaiBase.stub | python | def stub(self, obj, attr=None):
'''
Stub an object. If attr is not None, will attempt to stub that
attribute on the object. Only required for modules and other rare
cases where we can't determine the binding from the object.
'''
s = stub(obj, attr)
if s not in self._stubs:
self._stubs.append(s)
return s | Stub an object. If attr is not None, will attempt to stub that
attribute on the object. Only required for modules and other rare
cases where we can't determine the binding from the object. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/chai.py#L207-L216 | [
"def stub(obj, attr=None):\n '''\n Stub an object. If attr is not None, will attempt to stub that attribute\n on the object. Only required for modules and other rare cases where we\n can't determine the binding from the object.\n '''\n if attr:\n return _stub_attr(obj, attr)\n else:\n return _stub_obj(obj)\n"
] | class ChaiBase(unittest.TestCase):
'''
Base class for all tests
'''
# Load in the comparators
equals = Equals
almost_equals = AlmostEqual
length = Length
is_a = IsA
is_arg = Is
any_of = Any
all_of = All
not_of = Not
matches = Regex
func = Function
ignore_arg = Ignore
ignore = Ignore
in_arg = In
contains = Contains
var = Variable
like = Like
def setUp(self):
super(ChaiBase, self).setUp()
# Setup stub tracking
self._stubs = deque()
# Setup mock tracking
self._mocks = deque()
# Try to load this into the module that the test case is defined in, so
# that 'self.' can be removed. This has to be done at the start of the
# test because we need the reference to be correct at the time of test
# run, not when the class is defined or an instance is created. Walks
# through the method resolution order to set it on every module for
# Chai subclasses to handle when tests are defined in subclasses.
for cls in inspect.getmro(self.__class__):
if cls.__module__.startswith('chai'):
break
mod = sys.modules[cls.__module__]
for attr in dir(cls):
if hasattr(mod, attr):
continue
if attr.startswith('assert'):
setattr(mod, attr, getattr(self, attr))
elif isinstance(getattr(self, attr), type) and \
issubclass(getattr(self, attr), Comparator):
setattr(mod, attr, getattr(self, attr))
if not hasattr(mod, 'stub'):
setattr(mod, 'stub', self.stub)
if not hasattr(mod, 'expect'):
setattr(mod, 'expect', self.expect)
if not hasattr(mod, 'spy'):
setattr(mod, 'spy', self.spy)
if not hasattr(mod, 'mock'):
setattr(mod, 'mock', self.mock)
# Because cAmElCaSe sucks
setup = setUp
def tearDown(self):
super(ChaiBase, self).tearDown()
for cls in inspect.getmro(self.__class__):
if cls.__module__.startswith('chai'):
break
mod = sys.modules[cls.__module__]
if getattr(mod, 'stub', None) == self.stub:
delattr(mod, 'stub')
if getattr(mod, 'expect', None) == self.expect:
delattr(mod, 'expect')
if getattr(mod, 'spy', None) == self.spy:
delattr(mod, 'spy')
if getattr(mod, 'mock', None) == self.mock:
delattr(mod, 'mock')
# Docs insist that this will be called no matter what happens in
# runTest(), so this should be a safe spot to unstub everything.
# Even with teardown at the end of test_wrapper, tear down here in
# case the test was skipped or there was otherwise a problem with
# that test.
while len(self._stubs):
stub = self._stubs.popleft()
stub.teardown() # Teardown the reset of the stub
# Do the mocks in reverse order in the rare case someone called
# mock(obj,attr) twice.
while len(self._mocks):
mock = self._mocks.pop()
if len(mock) == 2:
delattr(mock[0], mock[1])
else:
setattr(mock[0], mock[1], mock[2])
# Clear out any cached variables
Variable.clear()
# Because cAmElCaSe sucks
teardown = tearDown
def expect(self, obj, attr=None):
'''
Open and return an expectation on an object. Will automatically create
a stub for the object. See stub documentation for argument information.
'''
return self.stub(obj, attr).expect()
def spy(self, obj, attr=None):
'''
Open and return a spy on an object. Will automatically create a stub
for the object. See stub documentation for argument information.
'''
return self.stub(obj, attr).spy()
def mock(self, obj=None, attr=None, **kwargs):
'''
Return a mock object.
'''
rval = Mock(**kwargs)
if obj is not None and attr is not None:
rval._object = obj
rval._attr = attr
if hasattr(obj, attr):
orig = getattr(obj, attr)
self._mocks.append((obj, attr, orig))
setattr(obj, attr, rval)
else:
self._mocks.append((obj, attr))
setattr(obj, attr, rval)
return rval
|
agoragames/chai | chai/chai.py | ChaiBase.mock | python | def mock(self, obj=None, attr=None, **kwargs):
'''
Return a mock object.
'''
rval = Mock(**kwargs)
if obj is not None and attr is not None:
rval._object = obj
rval._attr = attr
if hasattr(obj, attr):
orig = getattr(obj, attr)
self._mocks.append((obj, attr, orig))
setattr(obj, attr, rval)
else:
self._mocks.append((obj, attr))
setattr(obj, attr, rval)
return rval | Return a mock object. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/chai.py#L232-L248 | null | class ChaiBase(unittest.TestCase):
'''
Base class for all tests
'''
# Load in the comparators
equals = Equals
almost_equals = AlmostEqual
length = Length
is_a = IsA
is_arg = Is
any_of = Any
all_of = All
not_of = Not
matches = Regex
func = Function
ignore_arg = Ignore
ignore = Ignore
in_arg = In
contains = Contains
var = Variable
like = Like
def setUp(self):
super(ChaiBase, self).setUp()
# Setup stub tracking
self._stubs = deque()
# Setup mock tracking
self._mocks = deque()
# Try to load this into the module that the test case is defined in, so
# that 'self.' can be removed. This has to be done at the start of the
# test because we need the reference to be correct at the time of test
# run, not when the class is defined or an instance is created. Walks
# through the method resolution order to set it on every module for
# Chai subclasses to handle when tests are defined in subclasses.
for cls in inspect.getmro(self.__class__):
if cls.__module__.startswith('chai'):
break
mod = sys.modules[cls.__module__]
for attr in dir(cls):
if hasattr(mod, attr):
continue
if attr.startswith('assert'):
setattr(mod, attr, getattr(self, attr))
elif isinstance(getattr(self, attr), type) and \
issubclass(getattr(self, attr), Comparator):
setattr(mod, attr, getattr(self, attr))
if not hasattr(mod, 'stub'):
setattr(mod, 'stub', self.stub)
if not hasattr(mod, 'expect'):
setattr(mod, 'expect', self.expect)
if not hasattr(mod, 'spy'):
setattr(mod, 'spy', self.spy)
if not hasattr(mod, 'mock'):
setattr(mod, 'mock', self.mock)
# Because cAmElCaSe sucks
setup = setUp
def tearDown(self):
super(ChaiBase, self).tearDown()
for cls in inspect.getmro(self.__class__):
if cls.__module__.startswith('chai'):
break
mod = sys.modules[cls.__module__]
if getattr(mod, 'stub', None) == self.stub:
delattr(mod, 'stub')
if getattr(mod, 'expect', None) == self.expect:
delattr(mod, 'expect')
if getattr(mod, 'spy', None) == self.spy:
delattr(mod, 'spy')
if getattr(mod, 'mock', None) == self.mock:
delattr(mod, 'mock')
# Docs insist that this will be called no matter what happens in
# runTest(), so this should be a safe spot to unstub everything.
# Even with teardown at the end of test_wrapper, tear down here in
# case the test was skipped or there was otherwise a problem with
# that test.
while len(self._stubs):
stub = self._stubs.popleft()
stub.teardown() # Teardown the reset of the stub
# Do the mocks in reverse order in the rare case someone called
# mock(obj,attr) twice.
while len(self._mocks):
mock = self._mocks.pop()
if len(mock) == 2:
delattr(mock[0], mock[1])
else:
setattr(mock[0], mock[1], mock[2])
# Clear out any cached variables
Variable.clear()
# Because cAmElCaSe sucks
teardown = tearDown
def stub(self, obj, attr=None):
'''
Stub an object. If attr is not None, will attempt to stub that
attribute on the object. Only required for modules and other rare
cases where we can't determine the binding from the object.
'''
s = stub(obj, attr)
if s not in self._stubs:
self._stubs.append(s)
return s
def expect(self, obj, attr=None):
'''
Open and return an expectation on an object. Will automatically create
a stub for the object. See stub documentation for argument information.
'''
return self.stub(obj, attr).expect()
def spy(self, obj, attr=None):
'''
Open and return a spy on an object. Will automatically create a stub
for the object. See stub documentation for argument information.
'''
return self.stub(obj, attr).spy()
|
agoragames/chai | chai/comparators.py | build_comparators | python | def build_comparators(*values_or_types):
'''
All of the comparators that can be used for arguments.
'''
comparators = []
for item in values_or_types:
if isinstance(item, Comparator):
comparators.append(item)
elif isinstance(item, type):
# If you are passing around a type you will have to build a Equals
# comparator
comparators.append(Any(IsA(item), Is(item)))
else:
comparators.append(Equals(item))
return comparators | All of the comparators that can be used for arguments. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/comparators.py#L9-L23 | null | '''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/chai/blob/master/LICENSE.txt
'''
import re
class Comparator(object):
'''
Base class of all comparators, used for type testing
'''
def __eq__(self, value):
return self.test(value)
class Equals(Comparator):
'''
Simplest comparator.
'''
def __init__(self, value):
self._value = value
def test(self, value):
return self._value == value
def __repr__(self):
return repr(self._value)
__str__ = __repr__
class Length(Comparator):
'''
Compare the length of the argument.
'''
def __init__(self, value):
self._value = value
def test(self, value):
if isinstance(self._value, int):
return len(value) == self._value
return len(value) in self._value
def __repr__(self):
return repr(self._value)
__str__ = __repr__
class IsA(Comparator):
'''
Test to see if a value is an instance of something. Arguments match
isinstance
'''
def __init__(self, types):
self._types = types
def test(self, value):
return isinstance(value, self._types)
def _format_name(self):
if isinstance(self._types, type):
return self._types.__name__
else:
return str([o.__name__ for o in self._types])
def __repr__(self):
return "IsA(%s)" % (self._format_name())
__str__ = __repr__
class Is(Comparator):
'''
Checks for identity not equality
'''
def __init__(self, obj):
self._obj = obj
def test(self, value):
return self._obj is value
def __repr__(self):
return "Is(%s)" % (str(self._obj))
__str__ = __repr__
class AlmostEqual(Comparator):
'''
Compare a float value to n number of palces
'''
def __init__(self, float_value, places=7):
self._float_value = float_value
self._places = places
def test(self, value):
return round(value - self._float_value, self._places) == 0
def __repr__(self):
return "AlmostEqual(value: %s, places: %d)" % (
str(self._float_value), self._places)
__str__ = __repr__
class Regex(Comparator):
'''
Checks to see if a string matches a regex
'''
def __init__(self, pattern, flags=0):
self._pattern = pattern
self._flags = flags
self._regex = re.compile(pattern)
def test(self, value):
return self._regex.search(value) is not None
def __repr__(self):
return "Regex(pattern: %s, flags: %s)" % (self._pattern, self._flags)
__str__ = __repr__
class Any(Comparator):
'''
Test to see if any comparator matches
'''
def __init__(self, *comparators):
self._comparators = build_comparators(*comparators)
def test(self, value):
for comp in self._comparators:
if comp.test(value):
return True
return False
def __repr__(self):
return "Any(%s)" % str(self._comparators)
__str__ = __repr__
class In(Comparator):
'''
Test if a key is in a list or dict
'''
def __init__(self, hay_stack):
self._hay_stack = hay_stack
def test(self, needle):
return needle in self._hay_stack
def __repr__(self):
return "In(%s)" % (str(self._hay_stack))
__str__ = __repr__
class Contains(Comparator):
'''
Test if a key is in a list or dict
'''
def __init__(self, needle):
self._needle = needle
def test(self, hay_stack):
return self._needle in hay_stack
def __repr__(self):
return "Contains('%s')" % (str(self._needle))
__str__ = __repr__
class All(Comparator):
'''
Test to see if all comparators match
'''
def __init__(self, *comparators):
self._comparators = build_comparators(*comparators)
def test(self, value):
for comp in self._comparators:
if not comp.test(value):
return False
return True
def __repr__(self):
return "All(%s)" % (self._comparators)
__str__ = __repr__
class Not(Comparator):
'''
Return the opposite of a comparator
'''
def __init__(self, *comparators):
self._comparators = build_comparators(*comparators)
def test(self, value):
return all([not c.test(value) for c in self._comparators])
def __repr__(self):
return "Not(%s)" % (repr(self._comparators))
__str__ = __repr__
class Function(Comparator):
'''
Call a func to compare the values
'''
def __init__(self, func):
self._func = func
def test(self, value):
return self._func(value)
def __repr__(self):
return "Function(%s)" % (str(self._func))
__str__ = __repr__
class Ignore(Comparator):
'''
Igore this argument
'''
def test(self, value):
return True
def __repr__(self):
return "Ignore()"
__str__ = __repr__
class Variable(Comparator):
'''
A mechanism for tracking variables and their values.
'''
_cache = {}
@classmethod
def clear(self):
'''
Delete all cached values. Should only be used by the test suite.
'''
self._cache.clear()
def __init__(self, name):
self._name = name
@property
def value(self):
try:
return self._cache[self._name]
except KeyError:
raise ValueError("no value '%s'" % (self._name))
def test(self, value):
try:
return self._cache[self._name] == value
except KeyError:
self._cache[self._name] = value
return True
def __repr__(self):
return "Variable('%s')" % (self._name)
__str__ = __repr__
class Like(Comparator):
'''
A comparator that will assert that fields of a container look like
another.
'''
def __init__(self, src):
# This might have to change to support more iterable types
if not isinstance(src, (dict, set, list, tuple)):
raise ValueError(
"Like comparator only implemented for basic container types")
self._src = src
def test(self, value):
# This might need to change so that the ctor arg can be a list, but
# any iterable type can be tested.
if not isinstance(value, type(self._src)):
return False
rval = True
if isinstance(self._src, dict):
for k, v in self._src.items():
rval = rval and value.get(k) == v
else:
for item in self._src:
rval = rval and item in value
return rval
def __repr__(self):
return "Like(%s)" % (str(self._src))
__str__ = __repr__
|
agoragames/chai | chai/expectation.py | Expectation.args | python | def args(self, *args, **kwargs):
self._any_args = False
self._arguments_rule.set_args(*args, **kwargs)
return self | Creates a ArgumentsExpectationRule and adds it to the expectation | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/expectation.py#L139-L145 | [
"def set_args(self, *args, **kwargs):\n self.args = []\n self.kwargs = {}\n\n # Convert all of the arguments to comparators\n self.args = build_comparators(*args)\n self.kwargs = dict([(k, build_comparators(v)[0])\n for k, v in kwargs.items()])\n"
] | class Expectation(object):
'''
Encapsulate an expectation.
'''
def __init__(self, stub):
self._met = False
self._stub = stub
self._arguments_rule = ArgumentsExpectationRule()
self._raises = None
self._returns = None
self._max_count = None
self._min_count = 1
self._counts_defined = False
self._run_count = 0
self._any_order = False
self._side_effect = False
self._side_effect_args = None
self._side_effect_kwargs = None
self._teardown = False
self._any_args = True
# If the last expectation has no counts defined yet, set it to the
# run count if it's already been used, else set it to 1 just like
# the original implementation. This makes iterative testing much
# simpler without needing to know ahead of time exactly how many
# times an expectation will be called.
prev_expect = None if not stub.expectations else stub.expectations[-1]
if prev_expect and not prev_expect._counts_defined:
if prev_expect._run_count:
# Close immediately
prev_expect._met = True
prev_expect._max_count = prev_expect._run_count
else:
prev_expect._max_count = prev_expect._min_count
# Support expectations as context managers. See
# https://github.com/agoragames/chai/issues/1
def __enter__(self):
return self._returns
def __exit__(*args):
pass
def any_args(self):
'''
Accept any arguments passed to this call.
'''
self._any_args = True
return self
def returns(self, value):
"""
What this expectation should return
"""
self._returns = value
return self
def raises(self, exception):
"""
Adds a raises to the expectation, this will be raised when the
expectation is met.
This can be either the exception class or instance of a exception.
"""
self._raises = exception
return self
def times(self, count):
self._min_count = self._max_count = count
self._counts_defined = True
return self
def at_least(self, min_count):
self._min_count = min_count
self._max_count = None
self._counts_defined = True
return self
def at_least_once(self):
self.at_least(1)
self._counts_defined = True
return self
def at_most(self, max_count):
self._max_count = max_count
self._counts_defined = True
return self
def at_most_once(self):
self.at_most(1)
self._counts_defined = True
return self
def once(self):
self._min_count = 1
self._max_count = 1
self._counts_defined = True
return self
def any_order(self):
self._any_order = True
return self
def is_any_order(self):
return self._any_order
def side_effect(self, func, *args, **kwargs):
self._side_effect = func
self._side_effect_args = args
self._side_effect_kwargs = kwargs
return self
def teardown(self):
self._teardown = True
# If counts have not been defined yet, then there's an implied use case
# here where once the expectation has been run, it should be torn down,
# i.e. max_count is same as min_count, i.e. 1
if not self._counts_defined:
self._max_count = self._min_count
return self
def return_value(self):
"""
Returns the value for this expectation or raises the proper exception.
"""
if self._raises:
# Handle exceptions
if inspect.isclass(self._raises):
raise self._raises()
else:
raise self._raises
else:
if isinstance(self._returns, tuple):
return tuple([x.value if isinstance(x, Variable)
else x for x in self._returns])
return self._returns.value if isinstance(self._returns, Variable) \
else self._returns
def close(self, *args, **kwargs):
'''
Mark this expectation as closed. It will no longer be used for matches.
'''
# If any_order, then this effectively is never closed. The
# Stub.__call__ will just bypass it when it doesn't match. If there
# is a strict count it will also be bypassed, but if there's just a
# min set up, then it'll effectively stay open and catch any matching
# call no matter the order.
if not self._any_order:
self._met = True
def closed(self, with_counts=False):
rval = self._met
if with_counts:
rval = rval or self.counts_met()
return rval
def counts_met(self):
return self._run_count >= self._min_count and not (
self._max_count and not self._max_count == self._run_count)
def match(self, *args, **kwargs):
"""
Check the if these args match this expectation.
"""
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs)
def test(self, *args, **kwargs):
"""
Validate all the rules with in this expectation to see if this
expectation has been met.
"""
side_effect_return = None
if not self._met:
if self.match(*args, **kwargs):
self._run_count += 1
if self._max_count is not None and \
self._run_count == self._max_count:
self._met = True
if self._side_effect:
if self._side_effect_args or self._side_effect_kwargs:
side_effect_return = self._side_effect(
*self._side_effect_args,
**self._side_effect_kwargs)
else:
side_effect_return = self._side_effect(*args, **kwargs)
else:
self._met = False
# If this is met and we're supposed to tear down, must do it now
# so that this stub is not called again
if self._met and self._teardown:
self._stub.teardown()
# return_value has priority to not break existing uses of side effects
rval = self.return_value()
if rval is None:
rval = side_effect_return
return rval
def __str__(self):
runs_string = " Ran: %s, Min Runs: %s, Max Runs: %s" % (
self._run_count, self._min_count,
"∞" if self._max_count is None else self._max_count)
return_string = " Raises: %s" % (
self._raises if self._raises else " Returns: %s" % repr(
self._returns))
return "\n\t%s\n\t%s\n\t\t%s\n\t\t%s" % (
colored("%s - %s" % (
self._stub.name,
"Passed" if self._arguments_rule._passed else "Failed"),
"green" if self._arguments_rule._passed else "red"),
self._arguments_rule, return_string, runs_string)
|
agoragames/chai | chai/expectation.py | Expectation.return_value | python | def return_value(self):
if self._raises:
# Handle exceptions
if inspect.isclass(self._raises):
raise self._raises()
else:
raise self._raises
else:
if isinstance(self._returns, tuple):
return tuple([x.value if isinstance(x, Variable)
else x for x in self._returns])
return self._returns.value if isinstance(self._returns, Variable) \
else self._returns | Returns the value for this expectation or raises the proper exception. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/expectation.py#L226-L241 | null | class Expectation(object):
'''
Encapsulate an expectation.
'''
def __init__(self, stub):
self._met = False
self._stub = stub
self._arguments_rule = ArgumentsExpectationRule()
self._raises = None
self._returns = None
self._max_count = None
self._min_count = 1
self._counts_defined = False
self._run_count = 0
self._any_order = False
self._side_effect = False
self._side_effect_args = None
self._side_effect_kwargs = None
self._teardown = False
self._any_args = True
# If the last expectation has no counts defined yet, set it to the
# run count if it's already been used, else set it to 1 just like
# the original implementation. This makes iterative testing much
# simpler without needing to know ahead of time exactly how many
# times an expectation will be called.
prev_expect = None if not stub.expectations else stub.expectations[-1]
if prev_expect and not prev_expect._counts_defined:
if prev_expect._run_count:
# Close immediately
prev_expect._met = True
prev_expect._max_count = prev_expect._run_count
else:
prev_expect._max_count = prev_expect._min_count
# Support expectations as context managers. See
# https://github.com/agoragames/chai/issues/1
def __enter__(self):
return self._returns
def __exit__(*args):
pass
def args(self, *args, **kwargs):
"""
Creates a ArgumentsExpectationRule and adds it to the expectation
"""
self._any_args = False
self._arguments_rule.set_args(*args, **kwargs)
return self
def any_args(self):
'''
Accept any arguments passed to this call.
'''
self._any_args = True
return self
def returns(self, value):
"""
What this expectation should return
"""
self._returns = value
return self
def raises(self, exception):
"""
Adds a raises to the expectation, this will be raised when the
expectation is met.
This can be either the exception class or instance of a exception.
"""
self._raises = exception
return self
def times(self, count):
self._min_count = self._max_count = count
self._counts_defined = True
return self
def at_least(self, min_count):
self._min_count = min_count
self._max_count = None
self._counts_defined = True
return self
def at_least_once(self):
self.at_least(1)
self._counts_defined = True
return self
def at_most(self, max_count):
self._max_count = max_count
self._counts_defined = True
return self
def at_most_once(self):
self.at_most(1)
self._counts_defined = True
return self
def once(self):
self._min_count = 1
self._max_count = 1
self._counts_defined = True
return self
def any_order(self):
self._any_order = True
return self
def is_any_order(self):
return self._any_order
def side_effect(self, func, *args, **kwargs):
self._side_effect = func
self._side_effect_args = args
self._side_effect_kwargs = kwargs
return self
def teardown(self):
self._teardown = True
# If counts have not been defined yet, then there's an implied use case
# here where once the expectation has been run, it should be torn down,
# i.e. max_count is same as min_count, i.e. 1
if not self._counts_defined:
self._max_count = self._min_count
return self
def close(self, *args, **kwargs):
'''
Mark this expectation as closed. It will no longer be used for matches.
'''
# If any_order, then this effectively is never closed. The
# Stub.__call__ will just bypass it when it doesn't match. If there
# is a strict count it will also be bypassed, but if there's just a
# min set up, then it'll effectively stay open and catch any matching
# call no matter the order.
if not self._any_order:
self._met = True
def closed(self, with_counts=False):
rval = self._met
if with_counts:
rval = rval or self.counts_met()
return rval
def counts_met(self):
return self._run_count >= self._min_count and not (
self._max_count and not self._max_count == self._run_count)
def match(self, *args, **kwargs):
"""
Check the if these args match this expectation.
"""
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs)
def test(self, *args, **kwargs):
"""
Validate all the rules with in this expectation to see if this
expectation has been met.
"""
side_effect_return = None
if not self._met:
if self.match(*args, **kwargs):
self._run_count += 1
if self._max_count is not None and \
self._run_count == self._max_count:
self._met = True
if self._side_effect:
if self._side_effect_args or self._side_effect_kwargs:
side_effect_return = self._side_effect(
*self._side_effect_args,
**self._side_effect_kwargs)
else:
side_effect_return = self._side_effect(*args, **kwargs)
else:
self._met = False
# If this is met and we're supposed to tear down, must do it now
# so that this stub is not called again
if self._met and self._teardown:
self._stub.teardown()
# return_value has priority to not break existing uses of side effects
rval = self.return_value()
if rval is None:
rval = side_effect_return
return rval
def __str__(self):
runs_string = " Ran: %s, Min Runs: %s, Max Runs: %s" % (
self._run_count, self._min_count,
"∞" if self._max_count is None else self._max_count)
return_string = " Raises: %s" % (
self._raises if self._raises else " Returns: %s" % repr(
self._returns))
return "\n\t%s\n\t%s\n\t\t%s\n\t\t%s" % (
colored("%s - %s" % (
self._stub.name,
"Passed" if self._arguments_rule._passed else "Failed"),
"green" if self._arguments_rule._passed else "red"),
self._arguments_rule, return_string, runs_string)
|
agoragames/chai | chai/expectation.py | Expectation.match | python | def match(self, *args, **kwargs):
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs) | Check the if these args match this expectation. | train | https://github.com/agoragames/chai/blob/8148d7b7754226b0d1cabfc2af10cd912612abdc/chai/expectation.py#L265-L270 | [
"def validate(self, *args, **kwargs):\n self.in_args = args[:]\n self.in_kwargs = kwargs.copy()\n\n # First just check that the number of arguments is the same or\n # different\n if len(args) != len(self.args) or len(kwargs) != len(self.kwargs):\n self._passed = False\n return False\n\n for x in range(len(self.args)):\n if not self.args[x].test(args[x]):\n self._passed = False\n return False\n\n for arg_name, arg_test in self.kwargs.items():\n try:\n value = kwargs.pop(arg_name)\n except KeyError:\n self._passed = False\n return False\n if not arg_test.test(value):\n self._passed = False\n return False\n\n # If there are arguments left over, is error\n if len(kwargs):\n self._passed = False\n return False\n\n self._passed = True\n return self._passed\n"
] | class Expectation(object):
'''
Encapsulate an expectation.
'''
def __init__(self, stub):
self._met = False
self._stub = stub
self._arguments_rule = ArgumentsExpectationRule()
self._raises = None
self._returns = None
self._max_count = None
self._min_count = 1
self._counts_defined = False
self._run_count = 0
self._any_order = False
self._side_effect = False
self._side_effect_args = None
self._side_effect_kwargs = None
self._teardown = False
self._any_args = True
# If the last expectation has no counts defined yet, set it to the
# run count if it's already been used, else set it to 1 just like
# the original implementation. This makes iterative testing much
# simpler without needing to know ahead of time exactly how many
# times an expectation will be called.
prev_expect = None if not stub.expectations else stub.expectations[-1]
if prev_expect and not prev_expect._counts_defined:
if prev_expect._run_count:
# Close immediately
prev_expect._met = True
prev_expect._max_count = prev_expect._run_count
else:
prev_expect._max_count = prev_expect._min_count
# Support expectations as context managers. See
# https://github.com/agoragames/chai/issues/1
def __enter__(self):
return self._returns
def __exit__(*args):
pass
def args(self, *args, **kwargs):
"""
Creates a ArgumentsExpectationRule and adds it to the expectation
"""
self._any_args = False
self._arguments_rule.set_args(*args, **kwargs)
return self
def any_args(self):
'''
Accept any arguments passed to this call.
'''
self._any_args = True
return self
def returns(self, value):
"""
What this expectation should return
"""
self._returns = value
return self
def raises(self, exception):
"""
Adds a raises to the expectation, this will be raised when the
expectation is met.
This can be either the exception class or instance of a exception.
"""
self._raises = exception
return self
def times(self, count):
self._min_count = self._max_count = count
self._counts_defined = True
return self
def at_least(self, min_count):
self._min_count = min_count
self._max_count = None
self._counts_defined = True
return self
def at_least_once(self):
self.at_least(1)
self._counts_defined = True
return self
def at_most(self, max_count):
self._max_count = max_count
self._counts_defined = True
return self
def at_most_once(self):
self.at_most(1)
self._counts_defined = True
return self
def once(self):
self._min_count = 1
self._max_count = 1
self._counts_defined = True
return self
def any_order(self):
self._any_order = True
return self
def is_any_order(self):
return self._any_order
def side_effect(self, func, *args, **kwargs):
self._side_effect = func
self._side_effect_args = args
self._side_effect_kwargs = kwargs
return self
def teardown(self):
self._teardown = True
# If counts have not been defined yet, then there's an implied use case
# here where once the expectation has been run, it should be torn down,
# i.e. max_count is same as min_count, i.e. 1
if not self._counts_defined:
self._max_count = self._min_count
return self
def return_value(self):
"""
Returns the value for this expectation or raises the proper exception.
"""
if self._raises:
# Handle exceptions
if inspect.isclass(self._raises):
raise self._raises()
else:
raise self._raises
else:
if isinstance(self._returns, tuple):
return tuple([x.value if isinstance(x, Variable)
else x for x in self._returns])
return self._returns.value if isinstance(self._returns, Variable) \
else self._returns
def close(self, *args, **kwargs):
'''
Mark this expectation as closed. It will no longer be used for matches.
'''
# If any_order, then this effectively is never closed. The
# Stub.__call__ will just bypass it when it doesn't match. If there
# is a strict count it will also be bypassed, but if there's just a
# min set up, then it'll effectively stay open and catch any matching
# call no matter the order.
if not self._any_order:
self._met = True
def closed(self, with_counts=False):
rval = self._met
if with_counts:
rval = rval or self.counts_met()
return rval
def counts_met(self):
return self._run_count >= self._min_count and not (
self._max_count and not self._max_count == self._run_count)
def test(self, *args, **kwargs):
"""
Validate all the rules with in this expectation to see if this
expectation has been met.
"""
side_effect_return = None
if not self._met:
if self.match(*args, **kwargs):
self._run_count += 1
if self._max_count is not None and \
self._run_count == self._max_count:
self._met = True
if self._side_effect:
if self._side_effect_args or self._side_effect_kwargs:
side_effect_return = self._side_effect(
*self._side_effect_args,
**self._side_effect_kwargs)
else:
side_effect_return = self._side_effect(*args, **kwargs)
else:
self._met = False
# If this is met and we're supposed to tear down, must do it now
# so that this stub is not called again
if self._met and self._teardown:
self._stub.teardown()
# return_value has priority to not break existing uses of side effects
rval = self.return_value()
if rval is None:
rval = side_effect_return
return rval
def __str__(self):
runs_string = " Ran: %s, Min Runs: %s, Max Runs: %s" % (
self._run_count, self._min_count,
"∞" if self._max_count is None else self._max_count)
return_string = " Raises: %s" % (
self._raises if self._raises else " Returns: %s" % repr(
self._returns))
return "\n\t%s\n\t%s\n\t\t%s\n\t\t%s" % (
colored("%s - %s" % (
self._stub.name,
"Passed" if self._arguments_rule._passed else "Failed"),
"green" if self._arguments_rule._passed else "red"),
self._arguments_rule, return_string, runs_string)
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens._generate_tokens | python | def _generate_tokens(self, text):
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1])) | Generates tokens for the given code. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L79-L89 | [
"def line_to_offset(self, line, column):\n \"\"\"\n Converts 1-based line number and 0-based column to 0-based character offset into text.\n \"\"\"\n line -= 1\n if line >= len(self._line_offsets):\n return self._text_len\n elif line < 0:\n return 0\n else:\n return min(self._line_offsets[line] + max(0, column), self._text_len)\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_token_from_offset | python | def get_token_from_offset(self, offset):
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1] | Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L111-L116 | null | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_token | python | def get_token(self, lineno, col_offset):
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset)) | Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L118-L126 | [
"def get_token_from_offset(self, offset):\n \"\"\"\n Returns the token containing the given character offset (0-based position in source text),\n or the preceeding token if the position is between tokens.\n \"\"\"\n return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_token_from_utf8 | python | def get_token_from_utf8(self, lineno, col_offset):
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset)) | Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L128-L132 | [
"def get_token(self, lineno, col_offset):\n \"\"\"\n Returns the token containing the given (lineno, col_offset) position, or the preceeding token\n if the position is between tokens.\n \"\"\"\n # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which\n # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets\n # but isn't explicit.\n return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.next_token | python | def next_token(self, tok, include_extra=False):
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i] | Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L134-L143 | [
"def is_non_coding_token(token_type):\n \"\"\"\n These are considered non-coding tokens, as they don't affect the syntax tree.\n \"\"\"\n return token_type in (token.NL, token.COMMENT, token.ENCODING)\n",
"def is_non_coding_token(token_type):\n \"\"\"\n These are considered non-coding tokens, as they don't affect the syntax tree.\n \"\"\"\n return token_type >= token.N_TOKENS\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.find_token | python | def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t | Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L156-L166 | [
"def match_token(token, tok_type, tok_str=None):\n \"\"\"Returns true if token is of the given type and, if a string is given, has that string.\"\"\"\n return token.type == tok_type and (tok_str is None or token.string == tok_str)\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.token_range | python | def token_range(self, first_token, last_token, include_extra=False):
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i] | Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L168-L175 | [
"def is_non_coding_token(token_type):\n \"\"\"\n These are considered non-coding tokens, as they don't affect the syntax tree.\n \"\"\"\n return token_type in (token.NL, token.COMMENT, token.ENCODING)\n",
"def is_non_coding_token(token_type):\n \"\"\"\n These are considered non-coding tokens, as they don't affect the syntax tree.\n \"\"\"\n return token_type >= token.N_TOKENS\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_tokens | python | def get_tokens(self, node, include_extra=False):
return self.token_range(node.first_token, node.last_token, include_extra=include_extra) | Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L177-L182 | [
"def token_range(self, first_token, last_token, include_extra=False):\n \"\"\"\n Yields all tokens in order from first_token through and including last_token. If\n include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.\n \"\"\"\n for i in xrange(first_token.index, last_token.index + 1):\n if include_extra or not is_non_coding_token(self._tokens[i].type):\n yield self._tokens[i]\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_text_range | python | def get_text_range(self, node):
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos) | After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L184-L198 | [
"def get_tokens(self, node, include_extra=False):\n \"\"\"\n Yields all tokens making up the given node. If include_extra is True, includes non-coding\n tokens such as tokenize.NL and .COMMENT.\n \"\"\"\n return self.token_range(node.first_token, node.last_token, include_extra=include_extra)\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text(self, node):
"""
After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text.
"""
start, end = self.get_text_range(node)
return self._text[start : end]
|
gristlabs/asttokens | asttokens/asttokens.py | ASTTokens.get_text | python | def get_text(self, node):
start, end = self.get_text_range(node)
return self._text[start : end] | After mark_tokens() has been called, returns the text corresponding to the given node. Returns
'' for nodes (like `Load`) that don't correspond to any particular text. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/asttokens.py#L200-L206 | [
"def get_text_range(self, node):\n \"\"\"\n After mark_tokens() has been called, returns the (startpos, endpos) positions in source text\n corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond\n to any particular text.\n \"\"\"\n if not hasattr(node, 'first_token'):\n return (0, 0)\n\n start = node.first_token.startpos\n if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Multi-line nodes would be invalid unless we keep the indentation of the first node.\n start = self._text.rfind('\\n', 0, start) + 1\n\n return (start, node.last_token.endpos)\n"
] | class ASTTokens(object):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
that all offsets you'll get are to the unicode text, which is available as the ``.text``
property.
If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
tree marked with token info and made available as the ``.tree`` property.
If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
using ``astroid`` library <https://www.astroid.org>.
If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
tree created separately.
"""
def __init__(self, source_text, parse=False, tree=None, filename='<unknown>'):
self._filename = filename
self._tree = ast.parse(source_text, filename) if parse else tree
# Decode source after parsing to let Python 2 handle coding declarations.
# (If the encoding was not utf-8 compatible, then even if it parses correctly,
# we'll fail with a unicode error here.)
if isinstance(source_text, six.binary_type):
source_text = source_text.decode('utf8')
self._text = source_text
self._line_numbers = LineNumbers(source_text)
# Tokenize the code.
self._tokens = list(self._generate_tokens(source_text))
# Extract the start positions of all tokens, so that we can quickly map positions to tokens.
self._token_offsets = [tok.startpos for tok in self._tokens]
if self._tree:
self.mark_tokens(self._tree)
def mark_tokens(self, root_node):
"""
Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
them with token and position information by adding ``.first_token`` and
``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
"""
# The hard work of this class is done by MarkTokens
MarkTokens(self).visit_tree(root_node)
def _generate_tokens(self, text):
"""
Generates tokens for the given code.
"""
# This is technically an undocumented API for Python3, but allows us to use the same API as for
# Python2. See http://stackoverflow.com/a/4952291/328565.
for index, tok in enumerate(tokenize.generate_tokens(io.StringIO(text).readline)):
tok_type, tok_str, start, end, line = tok
yield Token(tok_type, tok_str, start, end, line, index,
self._line_numbers.line_to_offset(start[0], start[1]),
self._line_numbers.line_to_offset(end[0], end[1]))
@property
def text(self):
"""The source code passed into the constructor."""
return self._text
@property
def tokens(self):
"""The list of tokens corresponding to the source code from the constructor."""
return self._tokens
@property
def tree(self):
"""The root of the AST tree passed into the constructor or parsed from the source code."""
return self._tree
@property
def filename(self):
"""The filename that was parsed"""
return self._filename
def get_token_from_offset(self, offset):
"""
Returns the token containing the given character offset (0-based position in source text),
or the preceeding token if the position is between tokens.
"""
return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
def get_token(self, lineno, col_offset):
"""
Returns the token containing the given (lineno, col_offset) position, or the preceeding token
if the position is between tokens.
"""
# TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
# are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
# but isn't explicit.
return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
def get_token_from_utf8(self, lineno, col_offset):
"""
Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
"""
return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
def next_token(self, tok, include_extra=False):
"""
Returns the next token after the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index + 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i += 1
return self._tokens[i]
def prev_token(self, tok, include_extra=False):
"""
Returns the previous token before the given one. If include_extra is True, includes non-coding
tokens from the tokenize module, such as NL and COMMENT.
"""
i = tok.index - 1
if not include_extra:
while is_non_coding_token(self._tokens[i].type):
i -= 1
return self._tokens[i]
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
def token_range(self, first_token, last_token, include_extra=False):
"""
Yields all tokens in order from first_token through and including last_token. If
include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
"""
for i in xrange(first_token.index, last_token.index + 1):
if include_extra or not is_non_coding_token(self._tokens[i].type):
yield self._tokens[i]
def get_tokens(self, node, include_extra=False):
"""
Yields all tokens making up the given node. If include_extra is True, includes non-coding
tokens such as tokenize.NL and .COMMENT.
"""
return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
|
gristlabs/asttokens | asttokens/line_numbers.py | LineNumbers.from_utf8_col | python | def from_utf8_col(self, line, utf8_column):
offsets = self._utf8_offset_cache.get(line)
if offsets is None:
end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len
line_text = self._text[self._line_offsets[line - 1] : end_offset]
offsets = [i for i,c in enumerate(line_text) for byte in c.encode('utf8')]
offsets.append(len(line_text))
self._utf8_offset_cache[line] = offsets
return offsets[max(0, min(len(offsets)-1, utf8_column))] | Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/line_numbers.py#L35-L48 | null | class LineNumbers(object):
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers, as used by tokens and AST nodes.
This class expects unicode for input and stores positions in unicode. But it supports
translating to and from utf8 offsets, which are used by ast parsing.
"""
def __init__(self, text):
# A list of character offsets of each line's first character.
self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]
self._text = text
self._text_len = len(text)
self._utf8_offset_cache = {} # maps line num to list of char offset for each byte in line
def line_to_offset(self, line, column):
"""
Converts 1-based line number and 0-based column to 0-based character offset into text.
"""
line -= 1
if line >= len(self._line_offsets):
return self._text_len
elif line < 0:
return 0
else:
return min(self._line_offsets[line] + max(0, column), self._text_len)
def offset_to_line(self, offset):
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index])
|
gristlabs/asttokens | asttokens/line_numbers.py | LineNumbers.line_to_offset | python | def line_to_offset(self, line, column):
line -= 1
if line >= len(self._line_offsets):
return self._text_len
elif line < 0:
return 0
else:
return min(self._line_offsets[line] + max(0, column), self._text_len) | Converts 1-based line number and 0-based column to 0-based character offset into text. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/line_numbers.py#L50-L60 | null | class LineNumbers(object):
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers, as used by tokens and AST nodes.
This class expects unicode for input and stores positions in unicode. But it supports
translating to and from utf8 offsets, which are used by ast parsing.
"""
def __init__(self, text):
# A list of character offsets of each line's first character.
self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]
self._text = text
self._text_len = len(text)
self._utf8_offset_cache = {} # maps line num to list of char offset for each byte in line
def from_utf8_col(self, line, utf8_column):
"""
Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column.
"""
offsets = self._utf8_offset_cache.get(line)
if offsets is None:
end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len
line_text = self._text[self._line_offsets[line - 1] : end_offset]
offsets = [i for i,c in enumerate(line_text) for byte in c.encode('utf8')]
offsets.append(len(line_text))
self._utf8_offset_cache[line] = offsets
return offsets[max(0, min(len(offsets)-1, utf8_column))]
def offset_to_line(self, offset):
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index])
|
gristlabs/asttokens | asttokens/line_numbers.py | LineNumbers.offset_to_line | python | def offset_to_line(self, offset):
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index]) | Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/line_numbers.py#L62-L69 | null | class LineNumbers(object):
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers, as used by tokens and AST nodes.
This class expects unicode for input and stores positions in unicode. But it supports
translating to and from utf8 offsets, which are used by ast parsing.
"""
def __init__(self, text):
# A list of character offsets of each line's first character.
self._line_offsets = [m.start(0) for m in _line_start_re.finditer(text)]
self._text = text
self._text_len = len(text)
self._utf8_offset_cache = {} # maps line num to list of char offset for each byte in line
def from_utf8_col(self, line, utf8_column):
"""
Given a 1-based line number and 0-based utf8 column, returns a 0-based unicode column.
"""
offsets = self._utf8_offset_cache.get(line)
if offsets is None:
end_offset = self._line_offsets[line] if line < len(self._line_offsets) else self._text_len
line_text = self._text[self._line_offsets[line - 1] : end_offset]
offsets = [i for i,c in enumerate(line_text) for byte in c.encode('utf8')]
offsets.append(len(line_text))
self._utf8_offset_cache[line] = offsets
return offsets[max(0, min(len(offsets)-1, utf8_column))]
def line_to_offset(self, line, column):
"""
Converts 1-based line number and 0-based column to 0-based character offset into text.
"""
line -= 1
if line >= len(self._line_offsets):
return self._text_len
elif line < 0:
return 0
else:
return min(self._line_offsets[line] + max(0, column), self._text_len)
|
gristlabs/asttokens | asttokens/mark_tokens.py | MarkTokens._iter_non_child_tokens | python | def _iter_non_child_tokens(self, first_token, last_token, node):
tok = first_token
for n in self._iter_children(node):
for t in self._code.token_range(tok, self._code.prev_token(n.first_token)):
yield t
if n.last_token.index >= last_token.index:
return
tok = self._code.next_token(n.last_token)
for t in self._code.token_range(tok, last_token):
yield t | Generates all tokens in [first_token, last_token] range that do not belong to any children of
node. E.g. `foo(bar)` has children `foo` and `bar`, but we would yield the `(`. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/mark_tokens.py#L106-L120 | null | class MarkTokens(object):
"""
Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
to each of them. This is the heart of the token-marking logic.
"""
def __init__(self, code):
self._code = code
self._methods = util.NodeMethods()
self._iter_children = None
def visit_tree(self, node):
self._iter_children = util.iter_children_func(node)
util.visit_tree(node, self._visit_before_children, self._visit_after_children)
def _visit_before_children(self, node, parent_token):
col = getattr(node, 'col_offset', None)
token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None
if not token and util.is_module(node):
# We'll assume that a Module node starts at the start of the source code.
token = self._code.get_token(1, 0)
# Use our own token, or our parent's if we don't have one, to pass to child calls as
# parent_token argument. The second value becomes the token argument of _visit_after_children.
return (token or parent_token, token)
def _visit_after_children(self, node, parent_token, token):
# This processes the node generically first, after all children have been processed.
# Get the first and last tokens that belong to children. Note how this doesn't assume that we
# iterate through children in order that corresponds to occurrence in source code. This
# assumption can fail (e.g. with return annotations).
first = token
last = None
for child in self._iter_children(node):
if not first or child.first_token.index < first.index:
first = child.first_token
if not last or child.last_token.index > last.index:
last = child.last_token
# If we don't have a first token from _visit_before_children, and there were no children, then
# use the parent's token as the first token.
first = first or parent_token
# If no children, set last token to the first one.
last = last or first
# Statements continue to before NEWLINE. This helps cover a few different cases at once.
if util.is_stmt(node):
last = self._find_last_in_line(last)
# Capture any unmatched brackets.
first, last = self._expand_to_matching_pairs(first, last, node)
# Give a chance to node-specific methods to adjust.
nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
if (nfirst, nlast) != (first, last):
# If anything changed, expand again to capture any unmatched brackets.
nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
node.first_token = nfirst
node.last_token = nlast
def _find_last_in_line(self, start_token):
try:
newline = self._code.find_token(start_token, token.NEWLINE)
except IndexError:
newline = self._code.find_token(start_token, token.ENDMARKER)
return self._code.prev_token(newline)
def _expand_to_matching_pairs(self, first_token, last_token, node):
"""
Scan tokens in [first_token, last_token] range that are between node's children, and for any
unmatched brackets, adjust first/last tokens to include the closing pair.
"""
# We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
# child nodes). If we find any closing ones, we match them to the opens.
to_match_right = []
to_match_left = []
for tok in self._iter_non_child_tokens(first_token, last_token, node):
tok_info = tok[:2]
if to_match_right and tok_info == to_match_right[-1]:
to_match_right.pop()
elif tok_info in _matching_pairs_left:
to_match_right.append(_matching_pairs_left[tok_info])
elif tok_info in _matching_pairs_right:
to_match_left.append(_matching_pairs_right[tok_info])
# Once done, extend `last_token` to match any unclosed parens/braces.
for match in reversed(to_match_right):
last = self._code.next_token(last_token)
# Allow for a trailing comma before the closing delimiter.
if util.match_token(last, token.OP, ','):
last = self._code.next_token(last)
# Now check for the actual closing delimiter.
if util.match_token(last, *match):
last_token = last
# And extend `first_token` to match any unclosed opening parens/braces.
for match in to_match_left:
first = self._code.prev_token(first_token)
if util.match_token(first, *match):
first_token = first
return (first_token, last_token)
#----------------------------------------------------------------------
# Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
# that will actually be assigned.
def visit_default(self, node, first_token, last_token):
# pylint: disable=no-self-use
# By default, we don't need to adjust the token we computed earlier.
return (first_token, last_token)
def handle_comp(self, open_brace, node, first_token, last_token):
# For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
# include the opening brace (the closing brace will be matched automatically).
before = self._code.prev_token(first_token)
util.expect_token(before, token.OP, open_brace)
return (before, last_token)
def visit_listcomp(self, node, first_token, last_token):
return self.handle_comp('[', node, first_token, last_token)
if six.PY2:
# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
def visit_setcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_dictcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_comprehension(self, node, first_token, last_token):
# The 'comprehension' node starts with 'for' but we only get first child; we search backwards
# to find the 'for' keyword.
first = self._code.find_token(first_token, token.NAME, 'for', reverse=True)
return (first, last_token)
def handle_attr(self, node, first_token, last_token):
# Attribute node has ".attr" (2 tokens) after the last child.
dot = self._code.find_token(last_token, token.OP, '.')
name = self._code.next_token(dot)
util.expect_token(name, token.NAME)
return (first_token, name)
visit_attribute = handle_attr
visit_assignattr = handle_attr
visit_delattr = handle_attr
def handle_doc(self, node, first_token, last_token):
# With astroid, nodes that start with a doc-string can have an empty body, in which case we
# need to adjust the last token to include the doc string.
if not node.body and getattr(node, 'doc', None):
last_token = self._code.find_token(last_token, token.STRING)
return (first_token, last_token)
visit_classdef = handle_doc
visit_funcdef = handle_doc
def visit_call(self, node, first_token, last_token):
# A function call isn't over until we see a closing paren. Remember that last_token is at the
# end of all children, so we are not worried about encountering a paren that belongs to a
# child.
first_child = next(self._iter_children(node))
call_start = self._code.find_token(first_child.last_token, token.OP, '(')
if call_start.index > last_token.index:
last_token = call_start
return (first_token, last_token)
def visit_subscript(self, node, first_token, last_token):
# A subscript operations isn't over until we see a closing bracket. Similar to function calls.
return (first_token, self._code.find_token(last_token, token.OP, ']'))
def visit_tuple(self, node, first_token, last_token):
# A tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
try:
maybe_comma = self._code.next_token(last_token)
if util.match_token(maybe_comma, token.OP, ','):
last_token = maybe_comma
except IndexError:
pass
return (first_token, last_token)
def visit_str(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def visit_joinedstr(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def handle_str(self, first_token, last_token):
# Multiple adjacent STRING tokens form a single string.
last = self._code.next_token(last_token)
while util.match_token(last, token.STRING):
last_token = last
last = self._code.next_token(last_token)
return (first_token, last_token)
def visit_num(self, node, first_token, last_token):
# A constant like '-1' gets turned into two tokens; this will skip the '-'.
while util.match_token(last_token, token.OP):
last_token = self._code.next_token(last_token)
return (first_token, last_token)
# In Astroid, the Num and Str nodes are replaced by Const.
def visit_const(self, node, first_token, last_token):
if isinstance(node.value, numbers.Number):
return self.visit_num(node, first_token, last_token)
elif isinstance(node.value, six.string_types):
return self.visit_str(node, first_token, last_token)
return (first_token, last_token)
def visit_keyword(self, node, first_token, last_token):
if node.arg is not None:
equals = self._code.find_token(first_token, token.OP, '=', reverse=True)
name = self._code.prev_token(equals)
util.expect_token(name, token.NAME, node.arg)
first_token = name
return (first_token, last_token)
def visit_starred(self, node, first_token, last_token):
# Astroid has 'Starred' nodes (for "foo(*bar)" type args), but they need to be adjusted.
if not util.match_token(first_token, token.OP, '*'):
star = self._code.prev_token(first_token)
if util.match_token(star, token.OP, '*'):
first_token = star
return (first_token, last_token)
def visit_assignname(self, node, first_token, last_token):
# Astroid may turn 'except' clause into AssignName, but we need to adjust it.
if util.match_token(first_token, token.NAME, 'except'):
colon = self._code.find_token(last_token, token.OP, ':')
first_token = last_token = self._code.prev_token(colon)
return (first_token, last_token)
if six.PY2:
# No need for this on Python3, which already handles 'with' nodes correctly.
def visit_with(self, node, first_token, last_token):
first = self._code.find_token(first_token, token.NAME, 'with', reverse=True)
return (first, last_token)
|
gristlabs/asttokens | asttokens/mark_tokens.py | MarkTokens._expand_to_matching_pairs | python | def _expand_to_matching_pairs(self, first_token, last_token, node):
# We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
# child nodes). If we find any closing ones, we match them to the opens.
to_match_right = []
to_match_left = []
for tok in self._iter_non_child_tokens(first_token, last_token, node):
tok_info = tok[:2]
if to_match_right and tok_info == to_match_right[-1]:
to_match_right.pop()
elif tok_info in _matching_pairs_left:
to_match_right.append(_matching_pairs_left[tok_info])
elif tok_info in _matching_pairs_right:
to_match_left.append(_matching_pairs_right[tok_info])
# Once done, extend `last_token` to match any unclosed parens/braces.
for match in reversed(to_match_right):
last = self._code.next_token(last_token)
# Allow for a trailing comma before the closing delimiter.
if util.match_token(last, token.OP, ','):
last = self._code.next_token(last)
# Now check for the actual closing delimiter.
if util.match_token(last, *match):
last_token = last
# And extend `first_token` to match any unclosed opening parens/braces.
for match in to_match_left:
first = self._code.prev_token(first_token)
if util.match_token(first, *match):
first_token = first
return (first_token, last_token) | Scan tokens in [first_token, last_token] range that are between node's children, and for any
unmatched brackets, adjust first/last tokens to include the closing pair. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/mark_tokens.py#L122-L156 | null | class MarkTokens(object):
"""
Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
to each of them. This is the heart of the token-marking logic.
"""
def __init__(self, code):
self._code = code
self._methods = util.NodeMethods()
self._iter_children = None
def visit_tree(self, node):
self._iter_children = util.iter_children_func(node)
util.visit_tree(node, self._visit_before_children, self._visit_after_children)
def _visit_before_children(self, node, parent_token):
col = getattr(node, 'col_offset', None)
token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None
if not token and util.is_module(node):
# We'll assume that a Module node starts at the start of the source code.
token = self._code.get_token(1, 0)
# Use our own token, or our parent's if we don't have one, to pass to child calls as
# parent_token argument. The second value becomes the token argument of _visit_after_children.
return (token or parent_token, token)
def _visit_after_children(self, node, parent_token, token):
# This processes the node generically first, after all children have been processed.
# Get the first and last tokens that belong to children. Note how this doesn't assume that we
# iterate through children in order that corresponds to occurrence in source code. This
# assumption can fail (e.g. with return annotations).
first = token
last = None
for child in self._iter_children(node):
if not first or child.first_token.index < first.index:
first = child.first_token
if not last or child.last_token.index > last.index:
last = child.last_token
# If we don't have a first token from _visit_before_children, and there were no children, then
# use the parent's token as the first token.
first = first or parent_token
# If no children, set last token to the first one.
last = last or first
# Statements continue to before NEWLINE. This helps cover a few different cases at once.
if util.is_stmt(node):
last = self._find_last_in_line(last)
# Capture any unmatched brackets.
first, last = self._expand_to_matching_pairs(first, last, node)
# Give a chance to node-specific methods to adjust.
nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
if (nfirst, nlast) != (first, last):
# If anything changed, expand again to capture any unmatched brackets.
nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
node.first_token = nfirst
node.last_token = nlast
def _find_last_in_line(self, start_token):
try:
newline = self._code.find_token(start_token, token.NEWLINE)
except IndexError:
newline = self._code.find_token(start_token, token.ENDMARKER)
return self._code.prev_token(newline)
def _iter_non_child_tokens(self, first_token, last_token, node):
"""
Generates all tokens in [first_token, last_token] range that do not belong to any children of
node. E.g. `foo(bar)` has children `foo` and `bar`, but we would yield the `(`.
"""
tok = first_token
for n in self._iter_children(node):
for t in self._code.token_range(tok, self._code.prev_token(n.first_token)):
yield t
if n.last_token.index >= last_token.index:
return
tok = self._code.next_token(n.last_token)
for t in self._code.token_range(tok, last_token):
yield t
#----------------------------------------------------------------------
# Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
# that will actually be assigned.
def visit_default(self, node, first_token, last_token):
# pylint: disable=no-self-use
# By default, we don't need to adjust the token we computed earlier.
return (first_token, last_token)
def handle_comp(self, open_brace, node, first_token, last_token):
# For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
# include the opening brace (the closing brace will be matched automatically).
before = self._code.prev_token(first_token)
util.expect_token(before, token.OP, open_brace)
return (before, last_token)
def visit_listcomp(self, node, first_token, last_token):
return self.handle_comp('[', node, first_token, last_token)
if six.PY2:
# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
def visit_setcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_dictcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_comprehension(self, node, first_token, last_token):
# The 'comprehension' node starts with 'for' but we only get first child; we search backwards
# to find the 'for' keyword.
first = self._code.find_token(first_token, token.NAME, 'for', reverse=True)
return (first, last_token)
def handle_attr(self, node, first_token, last_token):
# Attribute node has ".attr" (2 tokens) after the last child.
dot = self._code.find_token(last_token, token.OP, '.')
name = self._code.next_token(dot)
util.expect_token(name, token.NAME)
return (first_token, name)
visit_attribute = handle_attr
visit_assignattr = handle_attr
visit_delattr = handle_attr
def handle_doc(self, node, first_token, last_token):
# With astroid, nodes that start with a doc-string can have an empty body, in which case we
# need to adjust the last token to include the doc string.
if not node.body and getattr(node, 'doc', None):
last_token = self._code.find_token(last_token, token.STRING)
return (first_token, last_token)
visit_classdef = handle_doc
visit_funcdef = handle_doc
def visit_call(self, node, first_token, last_token):
# A function call isn't over until we see a closing paren. Remember that last_token is at the
# end of all children, so we are not worried about encountering a paren that belongs to a
# child.
first_child = next(self._iter_children(node))
call_start = self._code.find_token(first_child.last_token, token.OP, '(')
if call_start.index > last_token.index:
last_token = call_start
return (first_token, last_token)
def visit_subscript(self, node, first_token, last_token):
# A subscript operations isn't over until we see a closing bracket. Similar to function calls.
return (first_token, self._code.find_token(last_token, token.OP, ']'))
def visit_tuple(self, node, first_token, last_token):
# A tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
try:
maybe_comma = self._code.next_token(last_token)
if util.match_token(maybe_comma, token.OP, ','):
last_token = maybe_comma
except IndexError:
pass
return (first_token, last_token)
def visit_str(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def visit_joinedstr(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def handle_str(self, first_token, last_token):
# Multiple adjacent STRING tokens form a single string.
last = self._code.next_token(last_token)
while util.match_token(last, token.STRING):
last_token = last
last = self._code.next_token(last_token)
return (first_token, last_token)
def visit_num(self, node, first_token, last_token):
# A constant like '-1' gets turned into two tokens; this will skip the '-'.
while util.match_token(last_token, token.OP):
last_token = self._code.next_token(last_token)
return (first_token, last_token)
# In Astroid, the Num and Str nodes are replaced by Const.
def visit_const(self, node, first_token, last_token):
if isinstance(node.value, numbers.Number):
return self.visit_num(node, first_token, last_token)
elif isinstance(node.value, six.string_types):
return self.visit_str(node, first_token, last_token)
return (first_token, last_token)
def visit_keyword(self, node, first_token, last_token):
if node.arg is not None:
equals = self._code.find_token(first_token, token.OP, '=', reverse=True)
name = self._code.prev_token(equals)
util.expect_token(name, token.NAME, node.arg)
first_token = name
return (first_token, last_token)
def visit_starred(self, node, first_token, last_token):
# Astroid has 'Starred' nodes (for "foo(*bar)" type args), but they need to be adjusted.
if not util.match_token(first_token, token.OP, '*'):
star = self._code.prev_token(first_token)
if util.match_token(star, token.OP, '*'):
first_token = star
return (first_token, last_token)
def visit_assignname(self, node, first_token, last_token):
# Astroid may turn 'except' clause into AssignName, but we need to adjust it.
if util.match_token(first_token, token.NAME, 'except'):
colon = self._code.find_token(last_token, token.OP, ':')
first_token = last_token = self._code.prev_token(colon)
return (first_token, last_token)
if six.PY2:
# No need for this on Python3, which already handles 'with' nodes correctly.
def visit_with(self, node, first_token, last_token):
first = self._code.find_token(first_token, token.NAME, 'with', reverse=True)
return (first, last_token)
|
gristlabs/asttokens | asttokens/util.py | match_token | python | def match_token(token, tok_type, tok_str=None):
return token.type == tok_type and (tok_str is None or token.string == tok_str) | Returns true if token is of the given type and, if a string is given, has that string. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L45-L47 | null | # Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import token
from six import iteritems
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
"""
TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize
module, and 3 additional ones useful for this module:
- [0] .type Token type (see token.py)
- [1] .string Token (a string)
- [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)
- [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)
- [4] .line Original line (string)
- [5] .index Index of the token in the list of tokens that it belongs to.
- [6] .startpos Starting character offset into the input text.
- [7] .endpos Ending character offset into the input text.
"""
def __str__(self):
return token_repr(self.type, self.string)
def expect_token(token, tok_type, tok_str=None):
"""
Verifies that the given token is of the expected type. If tok_str is given, the token string
is verified too. If the token doesn't match, raises an informative ValueError.
"""
if not match_token(token, tok_type, tok_str):
raise ValueError("Expected token %s, got %s on line %s col %s" % (
token_repr(tok_type, tok_str), str(token),
token.start[0], token.start[1] + 1))
# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.
if hasattr(token, 'COMMENT'):
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type in (token.NL, token.COMMENT, token.ENCODING)
else:
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type >= token.N_TOKENS
def iter_children(node):
"""
Yields all direct children of a AST node, skipping children that are singleton nodes.
"""
return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node)
def iter_children_func(node):
"""
Returns a slightly more optimized function to use in place of ``iter_children``, depending on
whether ``node`` is from ``ast`` or from the ``astroid`` module.
"""
return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast
def iter_children_astroid(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return []
return node.get_children()
SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}
def iter_children_ast(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return
for child in ast.iter_child_nodes(node):
# Skip singleton children; they don't reflect particular positions in the code and break the
# assumptions about the tree consisting of distinct nodes. Note that collecting classes
# beforehand and checking them in a set is faster than using isinstance each time.
if child.__class__ not in SINGLETONS:
yield child
stmt_class_names = {n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})
# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes
# in the same way, and without even importing astroid.
def is_expr(node):
"""Returns whether node is an expression node."""
return node.__class__.__name__ in expr_class_names
def is_stmt(node):
"""Returns whether node is a statement node."""
return node.__class__.__name__ in stmt_class_names
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == 'Module'
def is_joined_str(node):
"""Returns whether node is a JoinedStr node, used to represent f-strings."""
# At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only
# leads to errors.
return node.__class__.__name__ == 'JoinedStr'
# Sentinel value used by visit_tree().
_PREVISIT = object()
def visit_tree(node, previsit, postvisit):
"""
Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None.
"""
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``
itself), using depth-first pre-order traversal (yieling parents before their children).
This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and
``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.
"""
iter_children = iter_children_func(node)
done = set()
stack = [node]
while stack:
current = stack.pop()
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
yield current
# Insert all children in reverse order (so that first child ends up on top of the stack).
# This is faster than building a list and reversing it.
ins = len(stack)
for c in iter_children(current):
stack.insert(ins, c)
def replace(text, replacements):
"""
Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``.
"""
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return ''.join(parts)
class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
def get(self, obj, cls):
"""
Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found.
"""
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method
|
gristlabs/asttokens | asttokens/util.py | expect_token | python | def expect_token(token, tok_type, tok_str=None):
if not match_token(token, tok_type, tok_str):
raise ValueError("Expected token %s, got %s on line %s col %s" % (
token_repr(tok_type, tok_str), str(token),
token.start[0], token.start[1] + 1)) | Verifies that the given token is of the expected type. If tok_str is given, the token string
is verified too. If the token doesn't match, raises an informative ValueError. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L50-L58 | [
"def match_token(token, tok_type, tok_str=None):\n \"\"\"Returns true if token is of the given type and, if a string is given, has that string.\"\"\"\n return token.type == tok_type and (tok_str is None or token.string == tok_str)\n",
"def token_repr(tok_type, string):\n \"\"\"Returns a human-friendly representation of a token with the given type and string.\"\"\"\n # repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.\n return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))\n"
] | # Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import token
from six import iteritems
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
"""
TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize
module, and 3 additional ones useful for this module:
- [0] .type Token type (see token.py)
- [1] .string Token (a string)
- [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)
- [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)
- [4] .line Original line (string)
- [5] .index Index of the token in the list of tokens that it belongs to.
- [6] .startpos Starting character offset into the input text.
- [7] .endpos Ending character offset into the input text.
"""
def __str__(self):
return token_repr(self.type, self.string)
def match_token(token, tok_type, tok_str=None):
"""Returns true if token is of the given type and, if a string is given, has that string."""
return token.type == tok_type and (tok_str is None or token.string == tok_str)
# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.
if hasattr(token, 'COMMENT'):
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type in (token.NL, token.COMMENT, token.ENCODING)
else:
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type >= token.N_TOKENS
def iter_children(node):
"""
Yields all direct children of a AST node, skipping children that are singleton nodes.
"""
return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node)
def iter_children_func(node):
"""
Returns a slightly more optimized function to use in place of ``iter_children``, depending on
whether ``node`` is from ``ast`` or from the ``astroid`` module.
"""
return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast
def iter_children_astroid(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return []
return node.get_children()
SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}
def iter_children_ast(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return
for child in ast.iter_child_nodes(node):
# Skip singleton children; they don't reflect particular positions in the code and break the
# assumptions about the tree consisting of distinct nodes. Note that collecting classes
# beforehand and checking them in a set is faster than using isinstance each time.
if child.__class__ not in SINGLETONS:
yield child
stmt_class_names = {n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})
# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes
# in the same way, and without even importing astroid.
def is_expr(node):
"""Returns whether node is an expression node."""
return node.__class__.__name__ in expr_class_names
def is_stmt(node):
"""Returns whether node is a statement node."""
return node.__class__.__name__ in stmt_class_names
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == 'Module'
def is_joined_str(node):
"""Returns whether node is a JoinedStr node, used to represent f-strings."""
# At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only
# leads to errors.
return node.__class__.__name__ == 'JoinedStr'
# Sentinel value used by visit_tree().
_PREVISIT = object()
def visit_tree(node, previsit, postvisit):
"""
Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None.
"""
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``
itself), using depth-first pre-order traversal (yieling parents before their children).
This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and
``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.
"""
iter_children = iter_children_func(node)
done = set()
stack = [node]
while stack:
current = stack.pop()
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
yield current
# Insert all children in reverse order (so that first child ends up on top of the stack).
# This is faster than building a list and reversing it.
ins = len(stack)
for c in iter_children(current):
stack.insert(ins, c)
def replace(text, replacements):
"""
Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``.
"""
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return ''.join(parts)
class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
def get(self, obj, cls):
"""
Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found.
"""
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method
|
gristlabs/asttokens | asttokens/util.py | visit_tree | python | def visit_tree(node, previsit, postvisit):
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret | Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L144-L185 | [
"def iter_children_func(node):\n \"\"\"\n Returns a slightly more optimized function to use in place of ``iter_children``, depending on\n whether ``node`` is from ``ast`` or from the ``astroid`` module.\n \"\"\"\n return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast\n",
"def iter_children_astroid(node):\n # Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.\n if is_joined_str(node):\n return []\n\n return node.get_children()\n",
"def iter_children_ast(node):\n # Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.\n if is_joined_str(node):\n return\n\n for child in ast.iter_child_nodes(node):\n # Skip singleton children; they don't reflect particular positions in the code and break the\n # assumptions about the tree consisting of distinct nodes. Note that collecting classes\n # beforehand and checking them in a set is faster than using isinstance each time.\n if child.__class__ not in SINGLETONS:\n yield child\n",
"def _visit_before_children(self, node, parent_token):\n col = getattr(node, 'col_offset', None)\n token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None\n\n if not token and util.is_module(node):\n # We'll assume that a Module node starts at the start of the source code.\n token = self._code.get_token(1, 0)\n\n # Use our own token, or our parent's if we don't have one, to pass to child calls as\n # parent_token argument. The second value becomes the token argument of _visit_after_children.\n return (token or parent_token, token)\n",
"def _visit_after_children(self, node, parent_token, token):\n # This processes the node generically first, after all children have been processed.\n\n # Get the first and last tokens that belong to children. Note how this doesn't assume that we\n # iterate through children in order that corresponds to occurrence in source code. This\n # assumption can fail (e.g. with return annotations).\n first = token\n last = None\n for child in self._iter_children(node):\n if not first or child.first_token.index < first.index:\n first = child.first_token\n if not last or child.last_token.index > last.index:\n last = child.last_token\n\n # If we don't have a first token from _visit_before_children, and there were no children, then\n # use the parent's token as the first token.\n first = first or parent_token\n\n # If no children, set last token to the first one.\n last = last or first\n\n # Statements continue to before NEWLINE. This helps cover a few different cases at once.\n if util.is_stmt(node):\n last = self._find_last_in_line(last)\n\n # Capture any unmatched brackets.\n first, last = self._expand_to_matching_pairs(first, last, node)\n\n # Give a chance to node-specific methods to adjust.\n nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)\n\n if (nfirst, nlast) != (first, last):\n # If anything changed, expand again to capture any unmatched brackets.\n nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)\n\n node.first_token = nfirst\n node.last_token = nlast\n",
"def append(node, par_value):\n nodes.append(node)\n return (None, None)\n",
"previsit = lambda node, pvalue: (None, None)\n",
"postvisit = lambda node, pvalue, value: None\n"
] | # Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import token
from six import iteritems
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
"""
TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize
module, and 3 additional ones useful for this module:
- [0] .type Token type (see token.py)
- [1] .string Token (a string)
- [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)
- [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)
- [4] .line Original line (string)
- [5] .index Index of the token in the list of tokens that it belongs to.
- [6] .startpos Starting character offset into the input text.
- [7] .endpos Ending character offset into the input text.
"""
def __str__(self):
return token_repr(self.type, self.string)
def match_token(token, tok_type, tok_str=None):
"""Returns true if token is of the given type and, if a string is given, has that string."""
return token.type == tok_type and (tok_str is None or token.string == tok_str)
def expect_token(token, tok_type, tok_str=None):
"""
Verifies that the given token is of the expected type. If tok_str is given, the token string
is verified too. If the token doesn't match, raises an informative ValueError.
"""
if not match_token(token, tok_type, tok_str):
raise ValueError("Expected token %s, got %s on line %s col %s" % (
token_repr(tok_type, tok_str), str(token),
token.start[0], token.start[1] + 1))
# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.
if hasattr(token, 'COMMENT'):
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type in (token.NL, token.COMMENT, token.ENCODING)
else:
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type >= token.N_TOKENS
def iter_children(node):
"""
Yields all direct children of a AST node, skipping children that are singleton nodes.
"""
return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node)
def iter_children_func(node):
"""
Returns a slightly more optimized function to use in place of ``iter_children``, depending on
whether ``node`` is from ``ast`` or from the ``astroid`` module.
"""
return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast
def iter_children_astroid(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return []
return node.get_children()
SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}
def iter_children_ast(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return
for child in ast.iter_child_nodes(node):
# Skip singleton children; they don't reflect particular positions in the code and break the
# assumptions about the tree consisting of distinct nodes. Note that collecting classes
# beforehand and checking them in a set is faster than using isinstance each time.
if child.__class__ not in SINGLETONS:
yield child
stmt_class_names = {n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})
# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes
# in the same way, and without even importing astroid.
def is_expr(node):
"""Returns whether node is an expression node."""
return node.__class__.__name__ in expr_class_names
def is_stmt(node):
"""Returns whether node is a statement node."""
return node.__class__.__name__ in stmt_class_names
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == 'Module'
def is_joined_str(node):
"""Returns whether node is a JoinedStr node, used to represent f-strings."""
# At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only
# leads to errors.
return node.__class__.__name__ == 'JoinedStr'
# Sentinel value used by visit_tree().
_PREVISIT = object()
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``
itself), using depth-first pre-order traversal (yieling parents before their children).
This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and
``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.
"""
iter_children = iter_children_func(node)
done = set()
stack = [node]
while stack:
current = stack.pop()
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
yield current
# Insert all children in reverse order (so that first child ends up on top of the stack).
# This is faster than building a list and reversing it.
ins = len(stack)
for c in iter_children(current):
stack.insert(ins, c)
def replace(text, replacements):
"""
Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``.
"""
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return ''.join(parts)
class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
def get(self, obj, cls):
"""
Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found.
"""
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method
|
gristlabs/asttokens | asttokens/util.py | walk | python | def walk(node):
iter_children = iter_children_func(node)
done = set()
stack = [node]
while stack:
current = stack.pop()
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
yield current
# Insert all children in reverse order (so that first child ends up on top of the stack).
# This is faster than building a list and reversing it.
ins = len(stack)
for c in iter_children(current):
stack.insert(ins, c) | Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``
itself), using depth-first pre-order traversal (yieling parents before their children).
This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and
``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L189-L211 | [
"def iter_children_func(node):\n \"\"\"\n Returns a slightly more optimized function to use in place of ``iter_children``, depending on\n whether ``node`` is from ``ast`` or from the ``astroid`` module.\n \"\"\"\n return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast\n",
"def iter_children_astroid(node):\n # Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.\n if is_joined_str(node):\n return []\n\n return node.get_children()\n",
"def iter_children_ast(node):\n # Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.\n if is_joined_str(node):\n return\n\n for child in ast.iter_child_nodes(node):\n # Skip singleton children; they don't reflect particular positions in the code and break the\n # assumptions about the tree consisting of distinct nodes. Note that collecting classes\n # beforehand and checking them in a set is faster than using isinstance each time.\n if child.__class__ not in SINGLETONS:\n yield child\n"
] | # Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import token
from six import iteritems
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
"""
TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize
module, and 3 additional ones useful for this module:
- [0] .type Token type (see token.py)
- [1] .string Token (a string)
- [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)
- [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)
- [4] .line Original line (string)
- [5] .index Index of the token in the list of tokens that it belongs to.
- [6] .startpos Starting character offset into the input text.
- [7] .endpos Ending character offset into the input text.
"""
def __str__(self):
return token_repr(self.type, self.string)
def match_token(token, tok_type, tok_str=None):
"""Returns true if token is of the given type and, if a string is given, has that string."""
return token.type == tok_type and (tok_str is None or token.string == tok_str)
def expect_token(token, tok_type, tok_str=None):
"""
Verifies that the given token is of the expected type. If tok_str is given, the token string
is verified too. If the token doesn't match, raises an informative ValueError.
"""
if not match_token(token, tok_type, tok_str):
raise ValueError("Expected token %s, got %s on line %s col %s" % (
token_repr(tok_type, tok_str), str(token),
token.start[0], token.start[1] + 1))
# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.
if hasattr(token, 'COMMENT'):
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type in (token.NL, token.COMMENT, token.ENCODING)
else:
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type >= token.N_TOKENS
def iter_children(node):
"""
Yields all direct children of a AST node, skipping children that are singleton nodes.
"""
return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node)
def iter_children_func(node):
"""
Returns a slightly more optimized function to use in place of ``iter_children``, depending on
whether ``node`` is from ``ast`` or from the ``astroid`` module.
"""
return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast
def iter_children_astroid(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return []
return node.get_children()
SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}
def iter_children_ast(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return
for child in ast.iter_child_nodes(node):
# Skip singleton children; they don't reflect particular positions in the code and break the
# assumptions about the tree consisting of distinct nodes. Note that collecting classes
# beforehand and checking them in a set is faster than using isinstance each time.
if child.__class__ not in SINGLETONS:
yield child
stmt_class_names = {n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})
# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes
# in the same way, and without even importing astroid.
def is_expr(node):
"""Returns whether node is an expression node."""
return node.__class__.__name__ in expr_class_names
def is_stmt(node):
"""Returns whether node is a statement node."""
return node.__class__.__name__ in stmt_class_names
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == 'Module'
def is_joined_str(node):
"""Returns whether node is a JoinedStr node, used to represent f-strings."""
# At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only
# leads to errors.
return node.__class__.__name__ == 'JoinedStr'
# Sentinel value used by visit_tree().
_PREVISIT = object()
def visit_tree(node, previsit, postvisit):
"""
Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None.
"""
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret
def replace(text, replacements):
"""
Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``.
"""
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return ''.join(parts)
class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
def get(self, obj, cls):
"""
Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found.
"""
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method
|
gristlabs/asttokens | asttokens/util.py | replace | python | def replace(text, replacements):
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return ''.join(parts) | Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L214-L230 | null | # Copyright 2016 Grist Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
import token
from six import iteritems
def token_repr(tok_type, string):
"""Returns a human-friendly representation of a token with the given type and string."""
# repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
"""
TokenInfo is an 8-tuple containing the same 5 fields as the tokens produced by the tokenize
module, and 3 additional ones useful for this module:
- [0] .type Token type (see token.py)
- [1] .string Token (a string)
- [2] .start Starting (row, column) indices of the token (a 2-tuple of ints)
- [3] .end Ending (row, column) indices of the token (a 2-tuple of ints)
- [4] .line Original line (string)
- [5] .index Index of the token in the list of tokens that it belongs to.
- [6] .startpos Starting character offset into the input text.
- [7] .endpos Ending character offset into the input text.
"""
def __str__(self):
return token_repr(self.type, self.string)
def match_token(token, tok_type, tok_str=None):
"""Returns true if token is of the given type and, if a string is given, has that string."""
return token.type == tok_type and (tok_str is None or token.string == tok_str)
def expect_token(token, tok_type, tok_str=None):
"""
Verifies that the given token is of the expected type. If tok_str is given, the token string
is verified too. If the token doesn't match, raises an informative ValueError.
"""
if not match_token(token, tok_type, tok_str):
raise ValueError("Expected token %s, got %s on line %s col %s" % (
token_repr(tok_type, tok_str), str(token),
token.start[0], token.start[1] + 1))
# These were previously defined in tokenize.py and distinguishable by being greater than
# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly.
if hasattr(token, 'COMMENT'):
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type in (token.NL, token.COMMENT, token.ENCODING)
else:
def is_non_coding_token(token_type):
"""
These are considered non-coding tokens, as they don't affect the syntax tree.
"""
return token_type >= token.N_TOKENS
def iter_children(node):
"""
Yields all direct children of a AST node, skipping children that are singleton nodes.
"""
return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node)
def iter_children_func(node):
"""
Returns a slightly more optimized function to use in place of ``iter_children``, depending on
whether ``node`` is from ``ast`` or from the ``astroid`` module.
"""
return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast
def iter_children_astroid(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return []
return node.get_children()
SINGLETONS = {c for n, c in iteritems(ast.__dict__) if isinstance(c, type) and
issubclass(c, (ast.expr_context, ast.boolop, ast.operator, ast.unaryop, ast.cmpop))}
def iter_children_ast(node):
# Don't attempt to process children of JoinedStr nodes, which we can't fully handle yet.
if is_joined_str(node):
return
for child in ast.iter_child_nodes(node):
# Skip singleton children; they don't reflect particular positions in the code and break the
# assumptions about the tree consisting of distinct nodes. Note that collecting classes
# beforehand and checking them in a set is faster than using isinstance each time.
if child.__class__ not in SINGLETONS:
yield child
stmt_class_names = {n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.stmt)}
expr_class_names = ({n for n, c in iteritems(ast.__dict__)
if isinstance(c, type) and issubclass(c, ast.expr)} |
{'AssignName', 'DelName', 'Const', 'AssignAttr', 'DelAttr'})
# These feel hacky compared to isinstance() but allow us to work with both ast and astroid nodes
# in the same way, and without even importing astroid.
def is_expr(node):
"""Returns whether node is an expression node."""
return node.__class__.__name__ in expr_class_names
def is_stmt(node):
"""Returns whether node is a statement node."""
return node.__class__.__name__ in stmt_class_names
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == 'Module'
def is_joined_str(node):
"""Returns whether node is a JoinedStr node, used to represent f-strings."""
# At the moment, nodes below JoinedStr have wrong line/col info, and trying to process them only
# leads to errors.
return node.__class__.__name__ == 'JoinedStr'
# Sentinel value used by visit_tree().
_PREVISIT = object()
def visit_tree(node, previsit, postvisit):
"""
Scans the tree under the node depth-first using an explicit stack. It avoids implicit recursion
via the function call stack to avoid hitting 'maximum recursion depth exceeded' error.
It calls ``previsit()`` and ``postvisit()`` as follows:
* ``previsit(node, par_value)`` - should return ``(par_value, value)``
``par_value`` is as returned from ``previsit()`` of the parent.
* ``postvisit(node, par_value, value)`` - should return ``value``
``par_value`` is as returned from ``previsit()`` of the parent, and ``value`` is as
returned from ``previsit()`` of this node itself. The return ``value`` is ignored except
the one for the root node, which is returned from the overall ``visit_tree()`` call.
For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None.
"""
if not previsit:
previsit = lambda node, pvalue: (None, None)
if not postvisit:
postvisit = lambda node, pvalue, value: None
iter_children = iter_children_func(node)
done = set()
ret = None
stack = [(node, None, _PREVISIT)]
while stack:
current, par_value, value = stack.pop()
if value is _PREVISIT:
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
pvalue, post_value = previsit(current, par_value)
stack.append((current, par_value, post_value))
# Insert all children in reverse order (so that first child ends up on top of the stack).
ins = len(stack)
for n in iter_children(current):
stack.insert(ins, (n, pvalue, _PREVISIT))
else:
ret = postvisit(current, par_value, value)
return ret
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at ``node`` (including ``node``
itself), using depth-first pre-order traversal (yieling parents before their children).
This is similar to ``ast.walk()``, but with a different order, and it works for both ``ast`` and
``astroid`` trees. Also, as ``iter_children()``, it skips singleton nodes generated by ``ast``.
"""
iter_children = iter_children_func(node)
done = set()
stack = [node]
while stack:
current = stack.pop()
assert current not in done # protect againt infinite loop in case of a bad tree.
done.add(current)
yield current
# Insert all children in reverse order (so that first child ends up on top of the stack).
# This is faster than building a list and reversing it.
ins = len(stack)
for c in iter_children(current):
stack.insert(ins, c)
class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
def get(self, obj, cls):
"""
Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found.
"""
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method
|
gristlabs/asttokens | asttokens/util.py | NodeMethods.get | python | def get(self, obj, cls):
method = self._cache.get(cls)
if not method:
name = "visit_" + cls.__name__.lower()
method = getattr(obj, name, obj.visit_default)
self._cache[cls] = method
return method | Using the lowercase name of the class as node_type, returns `obj.visit_{node_type}`,
or `obj.visit_default` if the type-specific method is not found. | train | https://github.com/gristlabs/asttokens/blob/c8697dcf799a63d432727abb1d972adb3e85970a/asttokens/util.py#L240-L250 | null | class NodeMethods(object):
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
def __init__(self):
self._cache = {}
|
xenon-middleware/pyxenon | xenon/exceptions.py | make_exception | python | def make_exception(method, e):
x = e.details()
name = x[:x.find(':')].split('.')[-1]
if name in globals():
cls = globals()[name]
else:
cls = UnknownRpcException # noqa
return cls(method, e.code(), e.details()) | Creates an exception for a given method, and RpcError. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/exceptions.py#L18-L27 | null | class XenonException(Exception):
"""Xenon base exception."""
def __init__(self, method, code, msg):
def get_name():
try:
return method.name
except AttributeError:
pass
try:
return method.__name__
except AttributeError:
return "<uknown function>"
super(XenonException, self).__init__(
"Xenon ({}): \"{}\" in {}".format(code, msg, get_name()))
def exception_factory(name, docstring, BaseClass=XenonException):
def __init__(self, method, exc_code, exc_msg):
BaseClass.__init__(self, method, exc_code, exc_msg)
newclass = type(name, (BaseClass,), {"__init__": __init__})
newclass.__doc__ = docstring
return newclass
xenon_exceptions = {
"UnknownRpcException":
"""Default exception if nothing is known.""",
"AttributeNotSupportedException":
"""TODO: add doc-string.""",
"CopyCancelledException":
"""TODO: add doc-string.""",
"DirectoryNotEmptyException":
"""TODO: add doc-string.""",
"FileSystemClosedException":
"""TODO: add doc-string.""",
"IncompleteJobDescriptionException":
"""TODO: add doc-string.""",
"InvalidCredentialException":
"""TODO: add doc-string.""",
"InvalidJobDescriptionException":
"""TODO: add doc-string.""",
"InvalidLocationException":
"""TODO: add doc-string.""",
"InvalidOptionsException":
"""TODO: add doc-string.""",
"InvalidPathException":
"""TODO: add doc-string.""",
"InvalidPropertyException":
"""TODO: add doc-string.""",
"InvalidResumeTargetException":
"""TODO: add doc-string.""",
"NoSuchCopyException":
"""TODO: add doc-string.""",
"NoSuchJobException":
"""TODO: add doc-string.""",
"NoSuchPathException":
"""TODO: add doc-string.""",
"NoSuchQueueException":
"""TODO: add doc-string.""",
"PathAlreadyExistsException":
"""Exception that is raised if :py:meth:`FileSystem.create_directory`
fails due to an existing path.""",
"PropertyTypeException":
"""TODO: add doc-string.""",
"UnknownAdaptorException":
"""TODO: add doc-string.""",
"UnknownPropertyException":
"""TODO: add doc-string.""",
"UnsupportedJobDescriptionException":
"""TODO: add doc-string.""",
"UnsupportedOperationException":
"""TODO: add doc-string.""",
"XenonRuntimeException":
"""TODO: add doc-string."""}
for name, docstring in xenon_exceptions.items():
globals()[name] = exception_factory(name, docstring)
|
xenon-middleware/pyxenon | xenon/oop.py | get_field_type | python | def get_field_type(f):
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types) | Obtain the type name of a GRPC Message field. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L48-L52 | null | from .proto import xenon_pb2
from .server import __server__
from .exceptions import make_exception
import grpc
try:
from enum import Enum
except ImportError:
use_enum = False
class Enum(object):
"""Minimal Enum replacement."""
def __init__(self, name, items):
for k, v in items:
setattr(self, k, v)
else:
use_enum = True
try:
from inspect import (Signature, Parameter, signature)
except ImportError:
use_signature = False
else:
use_signature = True
def mirror_enum(parent, name):
grpc_enum = getattr(parent, name)
return Enum(name, grpc_enum.items())
def to_camel_case(name):
return ''.join(w.title() for w in name.split('_'))
def to_lower_camel_case(name):
words = name.split('_')
return words[0] + ''.join(w.title() for w in words[1:])
def get_fields(msg_type):
"""Get a list of field names for Grpc message."""
return list(f.name for f in msg_type.DESCRIPTOR.fields)
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower()
def list_attributes(msg_type):
"""List all attributes with type description of a GRPC Message class."""
return [(f.name, get_field_description(f))
for f in msg_type.fields]
class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
def unwrap(arg):
if hasattr(arg, '__is_proxy__'):
return arg.__wrapped__
else:
return arg
def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs)
def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs)
def apply_transform(service, t, x):
"""Apply a transformation using `self` as object reference."""
if t is None:
return x
else:
return t(service, x)
def transform_map(f):
def t(self, xs):
return (f(self, x) for x in xs)
return t
def grpc_call(service, method, request):
f = getattr(service, to_lower_camel_case(method.name))
try:
result = f(request)
except grpc.RpcError as e:
raise make_exception(method, e) from None
return result
def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method
class OopMeta(type):
"""Meta class for Grpc Object wrappers."""
def __new__(cls, name, parents, dct):
return super(OopMeta, cls).__new__(cls, name, parents, dct)
def __init__(cls, name, parents, dct):
super(OopMeta, cls).__init__(name, parents, dct)
for m in cls.__methods__():
if m.uses_request and not m.field_name:
m.field_name = cls.__field_name__
f = method_wrapper(m)
if use_signature:
f.__signature__ = m.signature
if cls.__servicer__:
f.__doc__ = m.docstring(cls.__servicer__)
f.__name__ = m.name
if m.static:
setattr(cls, m.name, classmethod(f))
else:
setattr(cls, m.name, f)
try:
grpc_cls = getattr(xenon_pb2, name)
if cls.__doc__ is None:
cls.__doc__ = "Wrapped proto message."
cls.__doc__ += "\n\n"
for attr in list_attributes(grpc_cls.DESCRIPTOR):
cls.__doc__ += \
" :ivar {0}: {0}\n :vartype {0}: {1}\n".format(*attr)
except AttributeError:
pass
class OopProxy(metaclass=OopMeta):
"""Base class for Grpc Object wrappers. Ensures basic object sanity,
namely the existence of `__service__` and `__wrapped__` members and
the using of `OopMeta` meta-class. Also manages retrieving attributes
from the wrapped instance.
:ivar __is_proxy__: If True, this value represents a wrapped value,
from which the GRPC message can be extraced by getting the
`__wrapped__` attribute.
:ivar __servicer__: If applicable, this gives the GRPC servicer class
associated with the proxy object; this is used to retrieve doc-strings.
:ivar __field_name__: The default name to which an object of this class
should be bound in a request. This can be overridden by specifying
the `field_name` property in the `GRPCMethod` definition. For a
well-designed API this should not be necessary though.
"""
__is_proxy__ = True
__servicer__ = None
__field_name__ = None
@classmethod
def __methods__(cls):
"""This method should return a list of GRPCMethod objects."""
return []
def __init__(self, service, wrapped):
self.__service__ = service
self.__wrapped__ = wrapped
@staticmethod
def __stub__(server):
"""Return the GRPC stub class to which this object interfaces."""
raise NotImplementedError()
def __getattr__(self, attr):
"""Accesses fields of the corresponding GRPC message."""
return getattr(self.__wrapped__, attr)
def _repr_html_(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ": <ul>"
for m in members:
s += "<li><b>{}:</b> {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " <i>(default)</i>"
s += "</li>"
s += "</ul>"
return s
def __str__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ":\n"
for m in members:
s += " {}: {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " (default)"
s += "\n"
return s
def __dir__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
return dir(super(OopProxy, self)) + members
|
xenon-middleware/pyxenon | xenon/oop.py | get_field_description | python | def get_field_description(f):
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower() | Get the type description of a GRPC Message field. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L55-L66 | [
"def get_field_type(f):\n \"\"\"Obtain the type name of a GRPC Message field.\"\"\"\n types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and\n getattr(f, t) == f.type)\n return next(types)\n"
] | from .proto import xenon_pb2
from .server import __server__
from .exceptions import make_exception
import grpc
try:
from enum import Enum
except ImportError:
use_enum = False
class Enum(object):
"""Minimal Enum replacement."""
def __init__(self, name, items):
for k, v in items:
setattr(self, k, v)
else:
use_enum = True
try:
from inspect import (Signature, Parameter, signature)
except ImportError:
use_signature = False
else:
use_signature = True
def mirror_enum(parent, name):
grpc_enum = getattr(parent, name)
return Enum(name, grpc_enum.items())
def to_camel_case(name):
return ''.join(w.title() for w in name.split('_'))
def to_lower_camel_case(name):
words = name.split('_')
return words[0] + ''.join(w.title() for w in words[1:])
def get_fields(msg_type):
"""Get a list of field names for Grpc message."""
return list(f.name for f in msg_type.DESCRIPTOR.fields)
def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types)
def list_attributes(msg_type):
"""List all attributes with type description of a GRPC Message class."""
return [(f.name, get_field_description(f))
for f in msg_type.fields]
class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
def unwrap(arg):
if hasattr(arg, '__is_proxy__'):
return arg.__wrapped__
else:
return arg
def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs)
def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs)
def apply_transform(service, t, x):
"""Apply a transformation using `self` as object reference."""
if t is None:
return x
else:
return t(service, x)
def transform_map(f):
def t(self, xs):
return (f(self, x) for x in xs)
return t
def grpc_call(service, method, request):
f = getattr(service, to_lower_camel_case(method.name))
try:
result = f(request)
except grpc.RpcError as e:
raise make_exception(method, e) from None
return result
def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method
class OopMeta(type):
"""Meta class for Grpc Object wrappers."""
def __new__(cls, name, parents, dct):
return super(OopMeta, cls).__new__(cls, name, parents, dct)
def __init__(cls, name, parents, dct):
super(OopMeta, cls).__init__(name, parents, dct)
for m in cls.__methods__():
if m.uses_request and not m.field_name:
m.field_name = cls.__field_name__
f = method_wrapper(m)
if use_signature:
f.__signature__ = m.signature
if cls.__servicer__:
f.__doc__ = m.docstring(cls.__servicer__)
f.__name__ = m.name
if m.static:
setattr(cls, m.name, classmethod(f))
else:
setattr(cls, m.name, f)
try:
grpc_cls = getattr(xenon_pb2, name)
if cls.__doc__ is None:
cls.__doc__ = "Wrapped proto message."
cls.__doc__ += "\n\n"
for attr in list_attributes(grpc_cls.DESCRIPTOR):
cls.__doc__ += \
" :ivar {0}: {0}\n :vartype {0}: {1}\n".format(*attr)
except AttributeError:
pass
class OopProxy(metaclass=OopMeta):
"""Base class for Grpc Object wrappers. Ensures basic object sanity,
namely the existence of `__service__` and `__wrapped__` members and
the using of `OopMeta` meta-class. Also manages retrieving attributes
from the wrapped instance.
:ivar __is_proxy__: If True, this value represents a wrapped value,
from which the GRPC message can be extraced by getting the
`__wrapped__` attribute.
:ivar __servicer__: If applicable, this gives the GRPC servicer class
associated with the proxy object; this is used to retrieve doc-strings.
:ivar __field_name__: The default name to which an object of this class
should be bound in a request. This can be overridden by specifying
the `field_name` property in the `GRPCMethod` definition. For a
well-designed API this should not be necessary though.
"""
__is_proxy__ = True
__servicer__ = None
__field_name__ = None
@classmethod
def __methods__(cls):
"""This method should return a list of GRPCMethod objects."""
return []
def __init__(self, service, wrapped):
self.__service__ = service
self.__wrapped__ = wrapped
@staticmethod
def __stub__(server):
"""Return the GRPC stub class to which this object interfaces."""
raise NotImplementedError()
def __getattr__(self, attr):
"""Accesses fields of the corresponding GRPC message."""
return getattr(self.__wrapped__, attr)
def _repr_html_(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ": <ul>"
for m in members:
s += "<li><b>{}:</b> {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " <i>(default)</i>"
s += "</li>"
s += "</ul>"
return s
def __str__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ":\n"
for m in members:
s += " {}: {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " (default)"
s += "\n"
return s
def __dir__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
return dir(super(OopProxy, self)) + members
|
xenon-middleware/pyxenon | xenon/oop.py | make_static_request | python | def make_static_request(method, *args, **kwargs):
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs) | Creates a request from a static method function call. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L188-L209 | null | from .proto import xenon_pb2
from .server import __server__
from .exceptions import make_exception
import grpc
try:
from enum import Enum
except ImportError:
use_enum = False
class Enum(object):
"""Minimal Enum replacement."""
def __init__(self, name, items):
for k, v in items:
setattr(self, k, v)
else:
use_enum = True
try:
from inspect import (Signature, Parameter, signature)
except ImportError:
use_signature = False
else:
use_signature = True
def mirror_enum(parent, name):
grpc_enum = getattr(parent, name)
return Enum(name, grpc_enum.items())
def to_camel_case(name):
return ''.join(w.title() for w in name.split('_'))
def to_lower_camel_case(name):
words = name.split('_')
return words[0] + ''.join(w.title() for w in words[1:])
def get_fields(msg_type):
"""Get a list of field names for Grpc message."""
return list(f.name for f in msg_type.DESCRIPTOR.fields)
def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types)
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower()
def list_attributes(msg_type):
"""List all attributes with type description of a GRPC Message class."""
return [(f.name, get_field_description(f))
for f in msg_type.fields]
class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
def unwrap(arg):
if hasattr(arg, '__is_proxy__'):
return arg.__wrapped__
else:
return arg
def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs)
def apply_transform(service, t, x):
"""Apply a transformation using `self` as object reference."""
if t is None:
return x
else:
return t(service, x)
def transform_map(f):
def t(self, xs):
return (f(self, x) for x in xs)
return t
def grpc_call(service, method, request):
f = getattr(service, to_lower_camel_case(method.name))
try:
result = f(request)
except grpc.RpcError as e:
raise make_exception(method, e) from None
return result
def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method
class OopMeta(type):
"""Meta class for Grpc Object wrappers."""
def __new__(cls, name, parents, dct):
return super(OopMeta, cls).__new__(cls, name, parents, dct)
def __init__(cls, name, parents, dct):
super(OopMeta, cls).__init__(name, parents, dct)
for m in cls.__methods__():
if m.uses_request and not m.field_name:
m.field_name = cls.__field_name__
f = method_wrapper(m)
if use_signature:
f.__signature__ = m.signature
if cls.__servicer__:
f.__doc__ = m.docstring(cls.__servicer__)
f.__name__ = m.name
if m.static:
setattr(cls, m.name, classmethod(f))
else:
setattr(cls, m.name, f)
try:
grpc_cls = getattr(xenon_pb2, name)
if cls.__doc__ is None:
cls.__doc__ = "Wrapped proto message."
cls.__doc__ += "\n\n"
for attr in list_attributes(grpc_cls.DESCRIPTOR):
cls.__doc__ += \
" :ivar {0}: {0}\n :vartype {0}: {1}\n".format(*attr)
except AttributeError:
pass
class OopProxy(metaclass=OopMeta):
"""Base class for Grpc Object wrappers. Ensures basic object sanity,
namely the existence of `__service__` and `__wrapped__` members and
the using of `OopMeta` meta-class. Also manages retrieving attributes
from the wrapped instance.
:ivar __is_proxy__: If True, this value represents a wrapped value,
from which the GRPC message can be extraced by getting the
`__wrapped__` attribute.
:ivar __servicer__: If applicable, this gives the GRPC servicer class
associated with the proxy object; this is used to retrieve doc-strings.
:ivar __field_name__: The default name to which an object of this class
should be bound in a request. This can be overridden by specifying
the `field_name` property in the `GRPCMethod` definition. For a
well-designed API this should not be necessary though.
"""
__is_proxy__ = True
__servicer__ = None
__field_name__ = None
@classmethod
def __methods__(cls):
"""This method should return a list of GRPCMethod objects."""
return []
def __init__(self, service, wrapped):
self.__service__ = service
self.__wrapped__ = wrapped
@staticmethod
def __stub__(server):
"""Return the GRPC stub class to which this object interfaces."""
raise NotImplementedError()
def __getattr__(self, attr):
"""Accesses fields of the corresponding GRPC message."""
return getattr(self.__wrapped__, attr)
def _repr_html_(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ": <ul>"
for m in members:
s += "<li><b>{}:</b> {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " <i>(default)</i>"
s += "</li>"
s += "</ul>"
return s
def __str__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ":\n"
for m in members:
s += " {}: {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " (default)"
s += "\n"
return s
def __dir__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
return dir(super(OopProxy, self)) + members
|
xenon-middleware/pyxenon | xenon/oop.py | make_request | python | def make_request(self, method, *args, **kwargs):
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs) | Creates a request from a method function call. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L212-L248 | [
"def unwrap(arg):\n if hasattr(arg, '__is_proxy__'):\n return arg.__wrapped__\n else:\n return arg\n",
"def translate_enum(arg):\n return arg.value if isinstance(arg, Enum) else arg\n"
] | from .proto import xenon_pb2
from .server import __server__
from .exceptions import make_exception
import grpc
try:
from enum import Enum
except ImportError:
use_enum = False
class Enum(object):
"""Minimal Enum replacement."""
def __init__(self, name, items):
for k, v in items:
setattr(self, k, v)
else:
use_enum = True
try:
from inspect import (Signature, Parameter, signature)
except ImportError:
use_signature = False
else:
use_signature = True
def mirror_enum(parent, name):
grpc_enum = getattr(parent, name)
return Enum(name, grpc_enum.items())
def to_camel_case(name):
return ''.join(w.title() for w in name.split('_'))
def to_lower_camel_case(name):
words = name.split('_')
return words[0] + ''.join(w.title() for w in words[1:])
def get_fields(msg_type):
"""Get a list of field names for Grpc message."""
return list(f.name for f in msg_type.DESCRIPTOR.fields)
def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types)
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower()
def list_attributes(msg_type):
"""List all attributes with type description of a GRPC Message class."""
return [(f.name, get_field_description(f))
for f in msg_type.fields]
class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
def unwrap(arg):
if hasattr(arg, '__is_proxy__'):
return arg.__wrapped__
else:
return arg
def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs)
def apply_transform(service, t, x):
"""Apply a transformation using `self` as object reference."""
if t is None:
return x
else:
return t(service, x)
def transform_map(f):
def t(self, xs):
return (f(self, x) for x in xs)
return t
def grpc_call(service, method, request):
f = getattr(service, to_lower_camel_case(method.name))
try:
result = f(request)
except grpc.RpcError as e:
raise make_exception(method, e) from None
return result
def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method
class OopMeta(type):
"""Meta class for Grpc Object wrappers."""
def __new__(cls, name, parents, dct):
return super(OopMeta, cls).__new__(cls, name, parents, dct)
def __init__(cls, name, parents, dct):
super(OopMeta, cls).__init__(name, parents, dct)
for m in cls.__methods__():
if m.uses_request and not m.field_name:
m.field_name = cls.__field_name__
f = method_wrapper(m)
if use_signature:
f.__signature__ = m.signature
if cls.__servicer__:
f.__doc__ = m.docstring(cls.__servicer__)
f.__name__ = m.name
if m.static:
setattr(cls, m.name, classmethod(f))
else:
setattr(cls, m.name, f)
try:
grpc_cls = getattr(xenon_pb2, name)
if cls.__doc__ is None:
cls.__doc__ = "Wrapped proto message."
cls.__doc__ += "\n\n"
for attr in list_attributes(grpc_cls.DESCRIPTOR):
cls.__doc__ += \
" :ivar {0}: {0}\n :vartype {0}: {1}\n".format(*attr)
except AttributeError:
pass
class OopProxy(metaclass=OopMeta):
"""Base class for Grpc Object wrappers. Ensures basic object sanity,
namely the existence of `__service__` and `__wrapped__` members and
the using of `OopMeta` meta-class. Also manages retrieving attributes
from the wrapped instance.
:ivar __is_proxy__: If True, this value represents a wrapped value,
from which the GRPC message can be extraced by getting the
`__wrapped__` attribute.
:ivar __servicer__: If applicable, this gives the GRPC servicer class
associated with the proxy object; this is used to retrieve doc-strings.
:ivar __field_name__: The default name to which an object of this class
should be bound in a request. This can be overridden by specifying
the `field_name` property in the `GRPCMethod` definition. For a
well-designed API this should not be necessary though.
"""
__is_proxy__ = True
__servicer__ = None
__field_name__ = None
@classmethod
def __methods__(cls):
"""This method should return a list of GRPCMethod objects."""
return []
def __init__(self, service, wrapped):
self.__service__ = service
self.__wrapped__ = wrapped
@staticmethod
def __stub__(server):
"""Return the GRPC stub class to which this object interfaces."""
raise NotImplementedError()
def __getattr__(self, attr):
"""Accesses fields of the corresponding GRPC message."""
return getattr(self.__wrapped__, attr)
def _repr_html_(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ": <ul>"
for m in members:
s += "<li><b>{}:</b> {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " <i>(default)</i>"
s += "</li>"
s += "</ul>"
return s
def __str__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ":\n"
for m in members:
s += " {}: {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " (default)"
s += "\n"
return s
def __dir__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
return dir(super(OopProxy, self)) + members
|
xenon-middleware/pyxenon | xenon/oop.py | method_wrapper | python | def method_wrapper(m):
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method | Generates a method from a `GrpcMethod` definition. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L276-L316 | null | from .proto import xenon_pb2
from .server import __server__
from .exceptions import make_exception
import grpc
try:
from enum import Enum
except ImportError:
use_enum = False
class Enum(object):
"""Minimal Enum replacement."""
def __init__(self, name, items):
for k, v in items:
setattr(self, k, v)
else:
use_enum = True
try:
from inspect import (Signature, Parameter, signature)
except ImportError:
use_signature = False
else:
use_signature = True
def mirror_enum(parent, name):
grpc_enum = getattr(parent, name)
return Enum(name, grpc_enum.items())
def to_camel_case(name):
return ''.join(w.title() for w in name.split('_'))
def to_lower_camel_case(name):
words = name.split('_')
return words[0] + ''.join(w.title() for w in words[1:])
def get_fields(msg_type):
"""Get a list of field names for Grpc message."""
return list(f.name for f in msg_type.DESCRIPTOR.fields)
def get_field_type(f):
"""Obtain the type name of a GRPC Message field."""
types = (t[5:] for t in dir(f) if t[:4] == 'TYPE' and
getattr(f, t) == f.type)
return next(types)
def get_field_description(f):
"""Get the type description of a GRPC Message field."""
type_name = get_field_type(f)
if type_name == 'MESSAGE' and \
{sf.name for sf in f.message_type.fields} == {'key', 'value'}:
return 'map<string, string>'
elif type_name == 'MESSAGE':
return f.message_type.full_name
elif type_name == 'ENUM':
return f.enum_type.full_name
else:
return type_name.lower()
def list_attributes(msg_type):
"""List all attributes with type description of a GRPC Message class."""
return [(f.name, get_field_description(f))
for f in msg_type.fields]
class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
def unwrap(arg):
if hasattr(arg, '__is_proxy__'):
return arg.__wrapped__
else:
return arg
def make_static_request(method, *args, **kwargs):
"""Creates a request from a static method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
if use_signature:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
None, *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
for k in bound_args:
if isinstance(bound_args[k], Enum):
bound_args[k] = bound_args[k].value
new_kwargs = {kw: v for kw, v in bound_args.items() if kw != 'cls'}
else:
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
return method.request_type(**new_kwargs)
def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs)
def apply_transform(service, t, x):
"""Apply a transformation using `self` as object reference."""
if t is None:
return x
else:
return t(service, x)
def transform_map(f):
def t(self, xs):
return (f(self, x) for x in xs)
return t
def grpc_call(service, method, request):
f = getattr(service, to_lower_camel_case(method.name))
try:
result = f(request)
except grpc.RpcError as e:
raise make_exception(method, e) from None
return result
class OopMeta(type):
"""Meta class for Grpc Object wrappers."""
def __new__(cls, name, parents, dct):
return super(OopMeta, cls).__new__(cls, name, parents, dct)
def __init__(cls, name, parents, dct):
super(OopMeta, cls).__init__(name, parents, dct)
for m in cls.__methods__():
if m.uses_request and not m.field_name:
m.field_name = cls.__field_name__
f = method_wrapper(m)
if use_signature:
f.__signature__ = m.signature
if cls.__servicer__:
f.__doc__ = m.docstring(cls.__servicer__)
f.__name__ = m.name
if m.static:
setattr(cls, m.name, classmethod(f))
else:
setattr(cls, m.name, f)
try:
grpc_cls = getattr(xenon_pb2, name)
if cls.__doc__ is None:
cls.__doc__ = "Wrapped proto message."
cls.__doc__ += "\n\n"
for attr in list_attributes(grpc_cls.DESCRIPTOR):
cls.__doc__ += \
" :ivar {0}: {0}\n :vartype {0}: {1}\n".format(*attr)
except AttributeError:
pass
class OopProxy(metaclass=OopMeta):
"""Base class for Grpc Object wrappers. Ensures basic object sanity,
namely the existence of `__service__` and `__wrapped__` members and
the using of `OopMeta` meta-class. Also manages retrieving attributes
from the wrapped instance.
:ivar __is_proxy__: If True, this value represents a wrapped value,
from which the GRPC message can be extraced by getting the
`__wrapped__` attribute.
:ivar __servicer__: If applicable, this gives the GRPC servicer class
associated with the proxy object; this is used to retrieve doc-strings.
:ivar __field_name__: The default name to which an object of this class
should be bound in a request. This can be overridden by specifying
the `field_name` property in the `GRPCMethod` definition. For a
well-designed API this should not be necessary though.
"""
__is_proxy__ = True
__servicer__ = None
__field_name__ = None
@classmethod
def __methods__(cls):
"""This method should return a list of GRPCMethod objects."""
return []
def __init__(self, service, wrapped):
self.__service__ = service
self.__wrapped__ = wrapped
@staticmethod
def __stub__(server):
"""Return the GRPC stub class to which this object interfaces."""
raise NotImplementedError()
def __getattr__(self, attr):
"""Accesses fields of the corresponding GRPC message."""
return getattr(self.__wrapped__, attr)
def _repr_html_(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ": <ul>"
for m in members:
s += "<li><b>{}:</b> {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " <i>(default)</i>"
s += "</li>"
s += "</ul>"
return s
def __str__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
s = type(self).__name__ + ":\n"
for m in members:
s += " {}: {}".format(m, getattr(self, m))
if m not in dir(self.__wrapped__):
s += " (default)"
s += "\n"
return s
def __dir__(self):
members = [f.name for f in self.__wrapped__.DESCRIPTOR.fields]
return dir(super(OopProxy, self)) + members
|
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.request_name | python | def request_name(self):
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request" | Generate the name of the request. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L104-L115 | [
"def to_camel_case(name):\n return ''.join(w.title() for w in name.split('_'))\n"
] | class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
|
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.request_type | python | def request_type(self):
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name) | Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L118-L127 | null | class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
|
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.signature | python | def signature(self):
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters) | Create a signature for this method, only in Python > 3.4 | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L131-L161 | [
"def get_fields(msg_type):\n \"\"\"Get a list of field names for Grpc message.\"\"\"\n return list(f.name for f in msg_type.DESCRIPTOR.fields)\n"
] | class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
# TODO extend documentation rendered from proto
def docstring(self, servicer):
"""Generate a doc-string."""
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s
|
xenon-middleware/pyxenon | xenon/oop.py | GrpcMethod.docstring | python | def docstring(self, servicer):
s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \
or "TODO: no docstring in .proto file"
if self.uses_request:
s += "\n"
for field in get_fields(self.request_type):
if field != self.field_name:
type_info = get_field_description(
self.request_type.DESCRIPTOR.fields_by_name[field])
s += " :param {}: {}\n".format(field, field)
s += " :type {0}: {1}\n".format(field, type_info)
return s | Generate a doc-string. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/oop.py#L164-L178 | [
"def get_fields(msg_type):\n \"\"\"Get a list of field names for Grpc message.\"\"\"\n return list(f.name for f in msg_type.DESCRIPTOR.fields)\n",
"def to_lower_camel_case(name):\n words = name.split('_')\n return words[0] + ''.join(w.title() for w in words[1:])\n",
"def get_field_description(f):\n \"\"\"Get the type description of a GRPC Message field.\"\"\"\n type_name = get_field_type(f)\n if type_name == 'MESSAGE' and \\\n {sf.name for sf in f.message_type.fields} == {'key', 'value'}:\n return 'map<string, string>'\n elif type_name == 'MESSAGE':\n return f.message_type.full_name\n elif type_name == 'ENUM':\n return f.enum_type.full_name\n else:\n return type_name.lower()\n"
] | class GrpcMethod:
"""Data container for a GRPC method.
:ivar name: underscore style of method name
:ivar uses_request: wether this method has a request, if this value is
`True`, the name is generated from `name`, if it is a string the
contents of this string are used.
:ivar field_name: name of `self` within the request.
:ivar input_transform: custom method to generate a request from the
method's arguments.
:ivar output_transform: custom method to extract the return value from
the return value.
"""
def __init__(self, name, uses_request=False, field_name=None,
input_transform=None, output_transform=None,
static=False):
self.name = name
self.uses_request = uses_request
self.field_name = field_name
self.input_transform = input_transform
self.output_transform = output_transform
self.static = static
@property
def is_simple(self):
return not self.uses_request and not self.input_transform \
and not self.static
@property
def request_name(self):
"""Generate the name of the request."""
if self.static and not self.uses_request:
return 'Empty'
if not self.uses_request:
return None
if isinstance(self.uses_request, str):
return self.uses_request
return to_camel_case(self.name) + "Request"
@property
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name)
# python 3 only
@property
def signature(self):
"""Create a signature for this method, only in Python > 3.4"""
if not use_signature:
raise NotImplementedError("Python 3 only.")
if self.static:
parameters = \
(Parameter(name='cls',
kind=Parameter.POSITIONAL_ONLY),)
else:
parameters = \
(Parameter(name='self',
kind=Parameter.POSITIONAL_ONLY),)
if self.input_transform:
return signature(self.input_transform)
if self.uses_request:
fields = get_fields(self.request_type)
if not self.static:
if self.field_name not in fields:
raise NameError("field '{}' not found in {}".format(
self.field_name, self.request_name))
fields.remove(self.field_name)
parameters += tuple(
Parameter(name=name, kind=Parameter.POSITIONAL_OR_KEYWORD,
default=None)
for name in fields)
return Signature(parameters)
# TODO extend documentation rendered from proto
|
xenon-middleware/pyxenon | xenon/compat.py | find_xenon_grpc_jar | python | def find_xenon_grpc_jar():
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None | Find the Xenon-GRPC jar-file, windows version. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/compat.py#L16-L34 | null | """
Define cross-platform methods.
"""
from pathlib import Path
import logging
import subprocess
import os
import sys
import signal
from .create_keys import create_self_signed_cert
from .version import xenon_grpc_version
def kill_process(process):
"""Kill the process group associated with the given process. (posix)"""
logger = logging.getLogger('xenon')
logger.info('Terminating Xenon-GRPC server.')
os.kill(process.pid, signal.SIGINT)
process.wait()
def start_xenon_server(port=50051, disable_tls=False):
"""Start the server."""
jar_file = find_xenon_grpc_jar()
if not jar_file:
raise RuntimeError("Could not find 'xenon-grpc' jar file.")
cmd = ['java', '-jar', jar_file, '-p', str(port)]
if not disable_tls:
crt_file, key_file = create_self_signed_cert()
cmd.extend([
'--server-cert-chain', str(crt_file),
'--server-private-key', str(key_file),
'--client-cert-chain', str(crt_file)])
else:
crt_file = key_file = None
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process, crt_file, key_file
|
xenon-middleware/pyxenon | xenon/compat.py | kill_process | python | def kill_process(process):
logger = logging.getLogger('xenon')
logger.info('Terminating Xenon-GRPC server.')
os.kill(process.pid, signal.SIGINT)
process.wait() | Kill the process group associated with the given process. (posix) | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/compat.py#L37-L42 | null | """
Define cross-platform methods.
"""
from pathlib import Path
import logging
import subprocess
import os
import sys
import signal
from .create_keys import create_self_signed_cert
from .version import xenon_grpc_version
def find_xenon_grpc_jar():
"""Find the Xenon-GRPC jar-file, windows version."""
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None
def start_xenon_server(port=50051, disable_tls=False):
"""Start the server."""
jar_file = find_xenon_grpc_jar()
if not jar_file:
raise RuntimeError("Could not find 'xenon-grpc' jar file.")
cmd = ['java', '-jar', jar_file, '-p', str(port)]
if not disable_tls:
crt_file, key_file = create_self_signed_cert()
cmd.extend([
'--server-cert-chain', str(crt_file),
'--server-private-key', str(key_file),
'--client-cert-chain', str(crt_file)])
else:
crt_file = key_file = None
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process, crt_file, key_file
|
xenon-middleware/pyxenon | xenon/compat.py | start_xenon_server | python | def start_xenon_server(port=50051, disable_tls=False):
jar_file = find_xenon_grpc_jar()
if not jar_file:
raise RuntimeError("Could not find 'xenon-grpc' jar file.")
cmd = ['java', '-jar', jar_file, '-p', str(port)]
if not disable_tls:
crt_file, key_file = create_self_signed_cert()
cmd.extend([
'--server-cert-chain', str(crt_file),
'--server-private-key', str(key_file),
'--client-cert-chain', str(crt_file)])
else:
crt_file = key_file = None
process = subprocess.Popen(
cmd,
bufsize=1,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process, crt_file, key_file | Start the server. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/compat.py#L45-L69 | [
"def create_self_signed_cert():\n \"\"\"Creates a self-signed certificate key pair.\"\"\"\n config_dir = Path(BaseDirectory.xdg_config_home) / 'xenon-grpc'\n config_dir.mkdir(parents=True, exist_ok=True)\n\n key_prefix = gethostname()\n crt_file = config_dir / ('%s.crt' % key_prefix)\n key_file = config_dir / ('%s.key' % key_prefix)\n\n if crt_file.exists() and key_file.exists():\n return crt_file, key_file\n\n logger = logging.getLogger('xenon')\n logger.info(\"Creating authentication keys for xenon-grpc.\")\n\n # create a key pair\n k = crypto.PKey()\n k.generate_key(crypto.TYPE_RSA, 1024)\n\n # create a self-signed cert\n cert = crypto.X509()\n cert.get_subject().CN = gethostname()\n cert.set_serial_number(1000)\n cert.gmtime_adj_notBefore(0)\n # valid for almost ten years!\n cert.gmtime_adj_notAfter(10 * 365 * 24 * 3600)\n cert.set_issuer(cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(k, 'sha256')\n\n open(str(crt_file), \"wb\").write(\n crypto.dump_certificate(crypto.FILETYPE_PEM, cert))\n open(str(key_file), \"wb\").write(\n crypto.dump_privatekey(crypto.FILETYPE_PEM, k))\n\n return crt_file, key_file\n",
"def find_xenon_grpc_jar():\n \"\"\"Find the Xenon-GRPC jar-file, windows version.\"\"\"\n prefix = Path(sys.prefix)\n\n locations = [\n prefix / 'lib',\n prefix / 'local' / 'lib'\n ]\n\n for location in locations:\n jar_file = location / 'xenon-grpc-{}-all.jar'.format(\n xenon_grpc_version)\n\n if not jar_file.exists():\n continue\n else:\n return str(jar_file)\n\n return None\n"
] | """
Define cross-platform methods.
"""
from pathlib import Path
import logging
import subprocess
import os
import sys
import signal
from .create_keys import create_self_signed_cert
from .version import xenon_grpc_version
def find_xenon_grpc_jar():
"""Find the Xenon-GRPC jar-file, windows version."""
prefix = Path(sys.prefix)
locations = [
prefix / 'lib',
prefix / 'local' / 'lib'
]
for location in locations:
jar_file = location / 'xenon-grpc-{}-all.jar'.format(
xenon_grpc_version)
if not jar_file.exists():
continue
else:
return str(jar_file)
return None
def kill_process(process):
"""Kill the process group associated with the given process. (posix)"""
logger = logging.getLogger('xenon')
logger.info('Terminating Xenon-GRPC server.')
os.kill(process.pid, signal.SIGINT)
process.wait()
|
xenon-middleware/pyxenon | examples/interactive.py | make_input_stream | python | def make_input_stream():
input_queue = Queue()
def input_stream():
while True:
cmd, msg = input_queue.get()
if cmd == 'end':
input_queue.task_done()
return
elif cmd == 'msg':
yield msg.encode()
input_queue.task_done()
return input_queue, input_stream | Creates a :py:class:`Queue` object and a co-routine yielding from that
queue. The queue should be populated with 2-tuples of the form `(command,
message)`, where `command` is one of [`msg`, `end`].
When the `end` command is recieved, the co-routine returns, ending the
stream.
When a `msg` command is received, the accompanying message is encoded and
yielded as a ``bytes`` object.
:return: tuple of (queue, stream) | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/examples/interactive.py#L6-L30 | null | import xenon
from queue import Queue
from timeout import timeout
def get_line(s):
"""The :py:meth:`submit_interactive_job()` method returns a stream of
objects that contain a ``stdout`` and ``stderr`` field, containing a
``bytes`` object. Here we're only reading from ``stdout``."""
return s.next().stdout.decode().strip()
# our input lines
input_lines = [
"Zlfgvp aboyr tnf,",
"Urnil lrg syrrgvat sebz tenfc,",
"Oyhr yvxr oheavat vpr."
]
# the job description, make sure you run the script from the examples
# directory!
job_description = xenon.JobDescription(
executable='python',
arguments=['rot13.py'],
queue_name='multi')
# start the xenon-grpc server
xenon.init()
# on the local adaptor
with xenon.Scheduler.create(adaptor='local') as scheduler:
input_queue, input_stream = make_input_stream()
# submit an interactive job, this gets us the job-id and a stream yielding
# job output from stdout and stderr.
job, output_stream = scheduler.submit_interactive_job(
description=job_description, stdin_stream=input_stream())
# next we feed the input_queue with messages
try:
for line in input_lines:
print(" [sending] " + line)
input_queue.put(('msg', line + '\n'))
msg = timeout(1.0, get_line, output_stream)
print("[received] " + msg)
# make sure to close our end whatever may happen
finally:
input_queue.put(('end', None))
input_queue.join()
scheduler.wait_until_done(job)
|
xenon-middleware/pyxenon | examples/timeout.py | timeout | python | def timeout(delay, call, *args, **kwargs):
return_value = None
def target():
nonlocal return_value
return_value = call(*args, **kwargs)
t = Thread(target=target)
t.start()
t.join(delay)
if t.is_alive():
raise RuntimeError("Operation did not complete within time.")
return return_value | Run a function call for `delay` seconds, and raise a RuntimeError
if the operation didn't complete. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/examples/timeout.py#L4-L19 | null | from threading import Thread
|
xenon-middleware/pyxenon | xenon/server.py | check_socket | python | def check_socket(host, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0 | Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L19-L23 | null | """
GRPC server connection.
"""
import atexit
import logging
import socket
import threading
import time
from contextlib import closing
import grpc
from .proto import (xenon_pb2_grpc)
from .compat import (start_xenon_server, kill_process)
def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel
def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip()))
class Server(object):
"""Xenon Server. This tries to find a running Xenon-GRPC server,
or start one if not found. This implementation may only work on Unix.
"""
def __init__(self, port=50051, disable_tls=False):
self.port = port
self.process = None
self.channel = None
self.threads = []
self.disable_tls = disable_tls
# Xenon proxies
self.scheduler_stub = None
self.file_system_stub = None
def __enter__(self):
logger = logging.getLogger('xenon')
if check_socket(socket.gethostname(), self.port):
logger.info('Xenon-GRPC servers seems to be running.')
else:
logger.info('Starting Xenon-GRPC server.')
self.process, crt_file, key_file = \
start_xenon_server(self.port, self.disable_tls)
for name, output in [('out', self.process.stdout),
('err', self.process.stderr)]:
thread = threading.Thread(
target=print_stream,
args=(output, name),
daemon=True)
thread.start()
for _ in range(50):
if check_socket(socket.gethostname(), self.port):
break
time.sleep(0.1)
else:
raise RuntimeError("GRPC started, but still can't connect.")
logger.info('Connecting to server')
if self.disable_tls:
self.channel = grpc.insecure_channel(
'{}:{}'.format(socket.gethostname(), self.port))
else:
self.channel = get_secure_channel(crt_file, key_file, self.port)
self.file_system_stub = \
xenon_pb2_grpc.FileSystemServiceStub(self.channel)
self.scheduler_stub = \
xenon_pb2_grpc.SchedulerServiceStub(self.channel)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.process:
kill_process(self.process)
self.process = None
__server__ = Server()
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__
|
xenon-middleware/pyxenon | xenon/server.py | get_secure_channel | python | def get_secure_channel(crt_file, key_file, port=50051):
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel | Try to connect over a secure channel. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L26-L36 | null | """
GRPC server connection.
"""
import atexit
import logging
import socket
import threading
import time
from contextlib import closing
import grpc
from .proto import (xenon_pb2_grpc)
from .compat import (start_xenon_server, kill_process)
def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0
def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip()))
class Server(object):
"""Xenon Server. This tries to find a running Xenon-GRPC server,
or start one if not found. This implementation may only work on Unix.
"""
def __init__(self, port=50051, disable_tls=False):
self.port = port
self.process = None
self.channel = None
self.threads = []
self.disable_tls = disable_tls
# Xenon proxies
self.scheduler_stub = None
self.file_system_stub = None
def __enter__(self):
logger = logging.getLogger('xenon')
if check_socket(socket.gethostname(), self.port):
logger.info('Xenon-GRPC servers seems to be running.')
else:
logger.info('Starting Xenon-GRPC server.')
self.process, crt_file, key_file = \
start_xenon_server(self.port, self.disable_tls)
for name, output in [('out', self.process.stdout),
('err', self.process.stderr)]:
thread = threading.Thread(
target=print_stream,
args=(output, name),
daemon=True)
thread.start()
for _ in range(50):
if check_socket(socket.gethostname(), self.port):
break
time.sleep(0.1)
else:
raise RuntimeError("GRPC started, but still can't connect.")
logger.info('Connecting to server')
if self.disable_tls:
self.channel = grpc.insecure_channel(
'{}:{}'.format(socket.gethostname(), self.port))
else:
self.channel = get_secure_channel(crt_file, key_file, self.port)
self.file_system_stub = \
xenon_pb2_grpc.FileSystemServiceStub(self.channel)
self.scheduler_stub = \
xenon_pb2_grpc.SchedulerServiceStub(self.channel)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.process:
kill_process(self.process)
self.process = None
__server__ = Server()
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__
|
xenon-middleware/pyxenon | xenon/server.py | find_free_port | python | def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1] | Finds a free port. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L39-L43 | null | """
GRPC server connection.
"""
import atexit
import logging
import socket
import threading
import time
from contextlib import closing
import grpc
from .proto import (xenon_pb2_grpc)
from .compat import (start_xenon_server, kill_process)
def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0
def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip()))
class Server(object):
"""Xenon Server. This tries to find a running Xenon-GRPC server,
or start one if not found. This implementation may only work on Unix.
"""
def __init__(self, port=50051, disable_tls=False):
self.port = port
self.process = None
self.channel = None
self.threads = []
self.disable_tls = disable_tls
# Xenon proxies
self.scheduler_stub = None
self.file_system_stub = None
def __enter__(self):
logger = logging.getLogger('xenon')
if check_socket(socket.gethostname(), self.port):
logger.info('Xenon-GRPC servers seems to be running.')
else:
logger.info('Starting Xenon-GRPC server.')
self.process, crt_file, key_file = \
start_xenon_server(self.port, self.disable_tls)
for name, output in [('out', self.process.stdout),
('err', self.process.stderr)]:
thread = threading.Thread(
target=print_stream,
args=(output, name),
daemon=True)
thread.start()
for _ in range(50):
if check_socket(socket.gethostname(), self.port):
break
time.sleep(0.1)
else:
raise RuntimeError("GRPC started, but still can't connect.")
logger.info('Connecting to server')
if self.disable_tls:
self.channel = grpc.insecure_channel(
'{}:{}'.format(socket.gethostname(), self.port))
else:
self.channel = get_secure_channel(crt_file, key_file, self.port)
self.file_system_stub = \
xenon_pb2_grpc.FileSystemServiceStub(self.channel)
self.scheduler_stub = \
xenon_pb2_grpc.SchedulerServiceStub(self.channel)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.process:
kill_process(self.process)
self.process = None
__server__ = Server()
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__
|
xenon-middleware/pyxenon | xenon/server.py | print_stream | python | def print_stream(file, name):
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) | Print stream from file to logger. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L46-L50 | null | """
GRPC server connection.
"""
import atexit
import logging
import socket
import threading
import time
from contextlib import closing
import grpc
from .proto import (xenon_pb2_grpc)
from .compat import (start_xenon_server, kill_process)
def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0
def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel
def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
class Server(object):
"""Xenon Server. This tries to find a running Xenon-GRPC server,
or start one if not found. This implementation may only work on Unix.
"""
def __init__(self, port=50051, disable_tls=False):
self.port = port
self.process = None
self.channel = None
self.threads = []
self.disable_tls = disable_tls
# Xenon proxies
self.scheduler_stub = None
self.file_system_stub = None
def __enter__(self):
logger = logging.getLogger('xenon')
if check_socket(socket.gethostname(), self.port):
logger.info('Xenon-GRPC servers seems to be running.')
else:
logger.info('Starting Xenon-GRPC server.')
self.process, crt_file, key_file = \
start_xenon_server(self.port, self.disable_tls)
for name, output in [('out', self.process.stdout),
('err', self.process.stderr)]:
thread = threading.Thread(
target=print_stream,
args=(output, name),
daemon=True)
thread.start()
for _ in range(50):
if check_socket(socket.gethostname(), self.port):
break
time.sleep(0.1)
else:
raise RuntimeError("GRPC started, but still can't connect.")
logger.info('Connecting to server')
if self.disable_tls:
self.channel = grpc.insecure_channel(
'{}:{}'.format(socket.gethostname(), self.port))
else:
self.channel = get_secure_channel(crt_file, key_file, self.port)
self.file_system_stub = \
xenon_pb2_grpc.FileSystemServiceStub(self.channel)
self.scheduler_stub = \
xenon_pb2_grpc.SchedulerServiceStub(self.channel)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.process:
kill_process(self.process)
self.process = None
__server__ = Server()
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
"""Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening."""
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__
|
xenon-middleware/pyxenon | xenon/server.py | init | python | def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'):
logger = logging.getLogger('xenon')
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(logging.Formatter(style='{'))
logger_handler.setLevel(getattr(logging, log_level))
logger.addHandler(logger_handler)
if port is None:
port = find_free_port()
if __server__.process is not None:
logger.warning(
"You tried to run init(), but the server is already running.")
return __server__
__server__.port = port
__server__.disable_tls = disable_tls
__server__.__enter__()
if not do_not_exit:
atexit.register(__server__.__exit__, None, None, None)
return __server__ | Start the Xenon GRPC server on the specified port, or, if a service
is already running on that port, connect to that.
If no port is given, a random port is selected. This means that, by
default, every python instance will start its own instance of a xenon-grpc
process.
:param port: the port number
:param do_not_exit: by default the GRPC server is shut down after Python
exits (through the `atexit` module), setting this value to `True` will
prevent that from happening. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L116-L151 | [
"def find_free_port():\n \"\"\"Finds a free port.\"\"\"\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.bind(('', 0))\n return sock.getsockname()[1]\n",
"def __enter__(self):\n logger = logging.getLogger('xenon')\n\n if check_socket(socket.gethostname(), self.port):\n logger.info('Xenon-GRPC servers seems to be running.')\n else:\n logger.info('Starting Xenon-GRPC server.')\n self.process, crt_file, key_file = \\\n start_xenon_server(self.port, self.disable_tls)\n\n for name, output in [('out', self.process.stdout),\n ('err', self.process.stderr)]:\n thread = threading.Thread(\n target=print_stream,\n args=(output, name),\n daemon=True)\n thread.start()\n\n for _ in range(50):\n if check_socket(socket.gethostname(), self.port):\n break\n time.sleep(0.1)\n else:\n raise RuntimeError(\"GRPC started, but still can't connect.\")\n\n logger.info('Connecting to server')\n if self.disable_tls:\n self.channel = grpc.insecure_channel(\n '{}:{}'.format(socket.gethostname(), self.port))\n else:\n self.channel = get_secure_channel(crt_file, key_file, self.port)\n\n self.file_system_stub = \\\n xenon_pb2_grpc.FileSystemServiceStub(self.channel)\n self.scheduler_stub = \\\n xenon_pb2_grpc.SchedulerServiceStub(self.channel)\n return self\n"
] | """
GRPC server connection.
"""
import atexit
import logging
import socket
import threading
import time
from contextlib import closing
import grpc
from .proto import (xenon_pb2_grpc)
from .compat import (start_xenon_server, kill_process)
def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0
def get_secure_channel(crt_file, key_file, port=50051):
"""Try to connect over a secure channel."""
creds = grpc.ssl_channel_credentials(
root_certificates=open(str(crt_file), 'rb').read(),
private_key=open(str(key_file), 'rb').read(),
certificate_chain=open(str(crt_file), 'rb').read())
address = "{}:{}".format(socket.gethostname(), port)
channel = grpc.secure_channel(address, creds)
return channel
def find_free_port():
"""Finds a free port."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip()))
class Server(object):
"""Xenon Server. This tries to find a running Xenon-GRPC server,
or start one if not found. This implementation may only work on Unix.
"""
def __init__(self, port=50051, disable_tls=False):
self.port = port
self.process = None
self.channel = None
self.threads = []
self.disable_tls = disable_tls
# Xenon proxies
self.scheduler_stub = None
self.file_system_stub = None
def __enter__(self):
logger = logging.getLogger('xenon')
if check_socket(socket.gethostname(), self.port):
logger.info('Xenon-GRPC servers seems to be running.')
else:
logger.info('Starting Xenon-GRPC server.')
self.process, crt_file, key_file = \
start_xenon_server(self.port, self.disable_tls)
for name, output in [('out', self.process.stdout),
('err', self.process.stderr)]:
thread = threading.Thread(
target=print_stream,
args=(output, name),
daemon=True)
thread.start()
for _ in range(50):
if check_socket(socket.gethostname(), self.port):
break
time.sleep(0.1)
else:
raise RuntimeError("GRPC started, but still can't connect.")
logger.info('Connecting to server')
if self.disable_tls:
self.channel = grpc.insecure_channel(
'{}:{}'.format(socket.gethostname(), self.port))
else:
self.channel = get_secure_channel(crt_file, key_file, self.port)
self.file_system_stub = \
xenon_pb2_grpc.FileSystemServiceStub(self.channel)
self.scheduler_stub = \
xenon_pb2_grpc.SchedulerServiceStub(self.channel)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.process:
kill_process(self.process)
self.process = None
__server__ = Server()
|
xenon-middleware/pyxenon | xenon/create_keys.py | create_self_signed_cert | python | def create_self_signed_cert():
config_dir = Path(BaseDirectory.xdg_config_home) / 'xenon-grpc'
config_dir.mkdir(parents=True, exist_ok=True)
key_prefix = gethostname()
crt_file = config_dir / ('%s.crt' % key_prefix)
key_file = config_dir / ('%s.key' % key_prefix)
if crt_file.exists() and key_file.exists():
return crt_file, key_file
logger = logging.getLogger('xenon')
logger.info("Creating authentication keys for xenon-grpc.")
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().CN = gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
# valid for almost ten years!
cert.gmtime_adj_notAfter(10 * 365 * 24 * 3600)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha256')
open(str(crt_file), "wb").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(str(key_file), "wb").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
return crt_file, key_file | Creates a self-signed certificate key pair. | train | https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/create_keys.py#L14-L49 | null | """
Certificate creation.
"""
import logging
from socket import gethostname
from OpenSSL import crypto
from pathlib import Path
from xdg import BaseDirectory
|
rainwoodman/kdcount | kdcount/correlate.py | compute_sum_values | python | def compute_sum_values(i, j, data1, data2):
sum1_ij = 1.
for idx, d in zip([i,j], [data1, data2]):
if isinstance(d, field): sum1_ij *= d.wvalue[idx]
elif isinstance(d, points): sum1_ij *= d.weights[idx]
else:
raise NotImplementedError("data type not recognized")
sum2_ij = data1.weights[i] * data2.weights[j]
return sum1_ij, sum2_ij | Return the sum1_ij and sum2_ij values given
the input indices and data instances.
Notes
-----
This is called in `Binning.update_sums` to compute
the `sum1` and `sum2` contributions for indices `(i,j)`
Parameters
----------
i,j : array_like
the bin indices for these pairs
data1, data2 : `points`, `field` instances
the two `points` or `field` objects
Returns
-------
sum1_ij, sum2_ij : float, array_like (N,...)
contributions to sum1, sum2 -- either a float or array
of shape (N, ...) where N is the length of `i`, `j` | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L46-L77 | null | """
Correlation function (pair counting) with KDTree.
Pair counting is the basic algorithm to calculate correlation functions.
Correlation function is a commonly used metric in cosmology to measure
the clustering of matter, or the growth of large scale structure in the universe.
We implement :py:class:`paircount` for pair counting. Since this is a discrete
estimator, the binning is modeled by subclasses of :py:class:`Binning`. For example
- :py:class:`RBinning`
- :py:class:`RmuBinning`
- :py:class:`XYBinning`
- :py:class:`FlatSkyBinning`
- :py:class:`FlatSkyMultipoleBinning`
kdcount takes two types of input data: 'point' and 'field'.
:py:class:`kdcount.models.points` describes data with position and weight. For example, galaxies and
quasars are point data.
point.pos is a row array of the positions of the points; other fields are
used internally.
point.extra is the extra properties that can be used in the Binning. One use
is to exclude the Lyman-alpha pixels and Quasars from the same sightline.
:py:class:`kdcount.models.field` describes a continious field sampled at given positions, each sample
with a weight; a notorious example is the over-flux field in Lyman-alpha forest
it is a proxy of the over-density field sampled along quasar sightlines.
In the Python Interface, to count, one has to define the 'binning' scheme, by
subclassing :py:class:`Binning`. Binning describes a multi-dimension binning
scheme. The dimensions can be derived, for example, the norm of the spatial
separation can be a dimension the same way as the 'x' separation. For example,
see :py:class:`RmuBinning`.
"""
import numpy
import warnings
# local imports
from .models import points, field
from . import utils
class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def _setup(self):
"""
Set the binning info we need from the `edges`
"""
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0]
def linear(self, **paircoords):
"""
Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index
"""
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
"""
The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace.
"""
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords)
def sum_shapes(self, data1, data2):
"""
Return the shapes of the summation arrays,
given the input data and shape of the bins
"""
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape
def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
"""
Update the mean coordinate sums
"""
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
class RmuBinning(Binning):
"""
Binning in R and mu (angular along line of sight)
mu = cos(theta), relative to line of sight from a given observer.
Parameters
----------
rbins : array_like
the array of R bin edges
Nmu : int
number of bins in mu direction.
observer : array_like (Ndim)
location of the observer (for line of sight)
mu_min : float, optional
the lower edge of the first``mu`` bin; default is ``-1``
mu_max : float, optional
the upper edge of the last ``mu`` bin; default is ``1``
absmu : bool, optional
if ``mu_min`` > 0, this flag specifies whether to take the
absolute magnitude of ``mu`` such that pairs with negative ``mu``
are included
"""
def __init__(self, rbins, Nmu, observer, mu_min=-1., mu_max=1., absmu=False):
assert -1.0 <= mu_min <= 1.0, 'mu_min must be >= -1 and <= 1'
assert -1.0 <= mu_max <= 1.0, 'mu_max must be >= -1 and <= 1'
assert mu_min < mu_max, 'mu_min must be less than mu_max'
mubins = numpy.linspace(mu_min, mu_max, Nmu+1)
Binning.__init__(self, ['r', 'mu'], [rbins, mubins])
self.observer = numpy.array(observer)
self.absmu = absmu and mu_min >= 0
def digitize(self, r, i, j, data1, data2):
r1 = data1.pos[i]
r2 = data2.pos[j]
center = 0.5 * (r1 + r2) - self.observer
dr = r1 - r2
dot = numpy.einsum('ij, ij->i', dr, center)
center = numpy.einsum('ij, ij->i', center, center) ** 0.5
with numpy.errstate(invalid='ignore'):
mu = dot / (center * r)
mu[r == 0] = 10.0
if self.absmu: mu = numpy.abs(mu)
dig = self.linear(r=r, mu=mu)
return dig, dict(r=r, mu=mu)
class XYBinning(Binning):
"""
Binning along Sky-Line-of-sight directions.
The bins are be (sky, los)
Parameters
----------
Rmax : float
max radius to go to
Nbins : int
number of bins in each direction.
observer : array_like (Ndim)
location of the observer (for line of sight)
Notes
-----
with numpy imshow , the second axis los, will be vertical
with imshow( ..T,) the sky will be vertical.
"""
def __init__(self, Rmax, Nbins, observer):
self.Rmax = Rmax
sky_bins = numpy.linspace(0, Rmax, Nbins)
los_bins = numpy.linspace(-Rmax, Rmax, 2*Nbins)
Binning.__init__(self, ['sky', 'los'], [sky_bins, los_bins])
self.observer = observer
def digitize(self, r, i, j, data1, data2):
r1 = data1.pos[i]
r2 = data2.pos[j]
center = 0.5 * (r1 + r2) - self.observer
dr = r1 - r2
dot = numpy.einsum('ij, ij->i', dr, center)
center2 = numpy.einsum('ij, ij->i', center, center)
los = dot / center2 ** 0.5
dr2 = numpy.einsum('ij, ij->i', dr, dr)
x2 = numpy.abs(dr2 - los ** 2)
sky = x2 ** 0.5
dig = self.linear(sky=sky, los=los)
return dig, dict(sky=sky, los=los)
class RBinning(Binning):
"""
Binning along radial direction.
Parameters
----------
rbins : array_like
the R bin edges
"""
def __init__(self, rbins):
Binning.__init__(self, ['r'], [rbins])
def digitize(self, r, i, j, data1, data2):
# linear bins
dig = self.linear(r=r)
return dig, dict(r=r)
class FastRBinning(RBinning):
"""
Binning along radial direction, use the fast node count algorithm when possible.
Parameters
----------
rbins : array_like
the R bin edges
"""
enable_fast_node_count = True
class MultipoleBinning(Binning):
"""
Binning in R and `ell`, the multipole number, in the
flat sky approximation, such that all pairs have the
same line-of-sight, which is taken to be the axis specified
by the `los` parameter (default is the last dimension)
Parameters
----------
rbins : array_like
the array of R bin edges
ells : list of int
the multipole numbers to compute
los : int, {0, 1, 2}
the axis to treat as the line-of-sight
Notes
-----
This can be slow, especially for a large set of `ells`. It is
likely better to use finely-binned :class:`RmuBinning` and
simply integrate the results against the appropriate Legendre
polynomial to compute multipoles
"""
def __init__(self, rbins, ells):
from scipy.special import legendre
Binning.__init__(self, ['r'], [rbins], ells)
self.ells = numpy.array(ells)
self.legendre = [legendre(l) for l in self.channels]
def digitize(self, r, i, j, data1, data2):
r1 = data1.pos[i]
r2 = data2.pos[j]
center = 0.5 * (r1 + r2)
dr = r1 - r2
dot = numpy.einsum('ij, ij->i', dr, center)
center = numpy.einsum('ij, ij->i', center, center) ** 0.5
with numpy.errstate(invalid='ignore', divide='ignore'):
mu = dot / (center * r)
# linear bin index and weights
dig = self.linear(r=r)
w = numpy.array([leg(mu) for leg in self.legendre]) # shape should be (N_ell, len(r1))
w *= (2*self.ells+1)[:,None]
return dig, dict(r=r), w
class FlatSkyMultipoleBinning(Binning):
"""
Binning in R and `ell`, the multipole number, in the
flat sky approximation, such that all pairs have the
same line-of-sight, which is taken to be the axis specified
by the `los` parameter (default is the last dimension)
Parameters
----------
rbins : array_like
the array of R bin edges
ells : list of int
the multipole numbers to compute
los : int, {0, 1, 2}
the axis to treat as the line-of-sight
"""
def __init__(self, rbins, ells, los, **kwargs):
from scipy.special import legendre
Binning.__init__(self, ['r'], [rbins], ells)
self.los = los
self.ells = numpy.array(ells)
self.legendre = [legendre(l) for l in self.channels]
def digitize(self, r, i, j, data1, data2):
r1 = data1.pos[i]
r2 = data2.pos[j]
# parallel separation
d_par = (r1-r2)[:,self.los]
# enforce periodic boundary conditions
if data1.boxsize is not None:
L = data1.boxsize[self.los]
d_par[d_par > L*0.5] -= L
d_par[d_par <= -L*0.5] += L
# mu
with numpy.errstate(invalid='ignore'):
mu = d_par / r
# linear bin index and weights
dig = self.linear(r=r)
w = numpy.array([leg(mu) for leg in self.legendre]) # shape should be (N_ell, len(r1))
w *= (2*self.ells+1)[:,None]
return dig, dict(r=r), w
class FlatSkyBinning(Binning):
"""
Binning in R and mu, in the flat sky approximation, such
that all pairs have the same line-of-sight, which is
taken to be the axis specified by the `los` parameter
(default is the last dimension)
Parameters
----------
rbins : array_like
the array of R bin edges
Nmu : int
the number of bins in `mu` direction.
los : int, {0, 1, 2}
the axis to treat as the line-of-sight
mu_min : float, optional
the lower edge of the first``mu`` bin; default is ``-1``
mu_max : float, optional
the upper edge of the last ``mu`` bin; default is ``1``
absmu : bool, optional
if ``mu_min`` > 0, this flag specifies whether to take the
absolute magnitude of ``mu`` such that pairs with negative ``mu``
are included
"""
def __init__(self, rbins, Nmu, los, mu_min=-1., mu_max=1., absmu=False):
assert -1.0 <= mu_min <= 1.0, 'mu_min must be >= -1 and <= 1'
assert -1.0 <= mu_max <= 1.0, 'mu_max must be >= -1 and <= 1'
assert mu_min < mu_max, 'mu_min must be less than mu_max'
mubins = numpy.linspace(mu_min, mu_max, Nmu+1)
Binning.__init__(self, ['r','mu'], [rbins, mubins])
self.los = los
self.absmu = absmu and mu_min >= 0.
def digitize(self, r, i, j, data1, data2):
r1 = data1.pos[i]
r2 = data2.pos[j]
# parallel separation
d_par = (r1-r2)[:,self.los]
# enforce periodic boundary conditions
if data1.boxsize is not None:
L = data1.boxsize[self.los]
d_par[d_par > L*0.5] -= L
d_par[d_par <= -L*0.5] += L
# mu
with numpy.errstate(invalid='ignore'):
mu = d_par / r
mu[r == 0] = 10.0 # ignore self pairs by setting mu out of bounds
if self.absmu: mu = numpy.abs(mu)
# linear bin index
dig = self.linear(r=r, mu=mu)
return dig, dict(r=r, mu=mu)
class paircount(object):
"""
Paircounting via a KD-tree, on two data sets.
Attributes
----------
sum1 : array_like
the numerator in the correlator
sum2 : array_like
the denominator in the correlator
centers : list
the centers of the corresponding corr bin, one item per
binning direction.
edges : list
the edges of the corresponding corr bin, one item per
binning direction.
binning : :py:class:`Binning`
binning object of this paircount
data1 : :py:class:`dataset`
input data set1. It can be either
:py:class:`field` for discrete sampling of a continuous
field, or :py:class:`kdcount.models.points` for a point set.
data2 : :py:class:`dataset`
input data set2, see above.
np : int
number of parallel processes. set to 0 to disable parallelism
Notes
-----
The value of sum1 and sum2 depends on the types of input
For :py:class:`kdcount.models.points` and :py:class:`kdcount.models.points`:
- sum1 is the per bin sum of products of weights
- sum2 is always 1.0
For :py:class:`kdcount.models.field` and :py:class:`kdcount.models.points`:
- sum1 is the per bin sum of products of weights and the field value
- sum2 is the per bin sum of products of weights
For :py:class:`kdcount.models.field` and :py:class:`kdcount.models.field`:
- sum1 is the per bin sum of products of weights and the field value
(one value per field)
- sum2 is the per bin sum of products of weights
With this convention the usual form of Landy-Salay estimator is (
for points x points:
(DD.sum1 -2r DR.sum1 + r2 RR.sum1) / (r2 RR.sum1)
with r = sum(wD) / sum(wR)
"""
def __init__(self, data1, data2, binning, compute_mean_coords=False, usefast=None, np=None):
"""
binning is an instance of Binning, (eg, RBinning, RmuBinning)
Notes
-----
* if the value has multiple components, return counts with be 'tuple',
one item for each component
* if `compute_mean_coords` is `True`, then `meancenters` will hold
the mean coordinate value in each bin.
"""
if usefast is not None:
warnings.warn("usefast is no longer supported. Declare a binning class is compatible"
"to the node counting implementation with `enable_fast_node_count=True` as a class attribute",
DeprecationWarning, stacklevel=2)
pts_only = isinstance(data1, points) and isinstance(data2, points)
if binning.enable_fast_node_count:
if not pts_only:
raise ValueError("fast node based paircount only works with points")
if compute_mean_coords:
raise ValueError("fast node based paircount cannot count for mean coordinates of a bin")
with utils.MapReduce(np=np) as pool:
size_hint = pool.np * 2
# run the work, using a context manager
with paircount_queue(self, binning, [data1, data2],
size_hint=size_hint,
compute_mean_coords=compute_mean_coords,
pts_only=pts_only,
) as queue:
pool.map(queue.work, range(queue.size), reduce=queue.reduce)
self.weight = data1.norm * data2.norm
class paircount_queue(object):
"""
A queue of paircount jobs. roughly size_hint jobs are created, and they are
reduced to the paircount objects when the queue is joined.
"""
def __init__(self, pc, binning, data, size_hint, compute_mean_coords, pts_only):
"""
Parameters
----------
pc : `paircount`
the parent pair count object, which we will attach the final results to
binning : `Binning`
the binning instance
data : tuple
tuple of the two data trees that we are correlating
size_hint : int, optional
the number of jobs to create, as an hint. None or zero to create 1 job.
compute_mean_coords : bool, optional
whether to compute the average coordinate value of each pair per bin
"""
self.pc = pc
self.bins = binning
self.data = data
self.size_hint = size_hint
self.compute_mean_coords = compute_mean_coords
self.pts_only = pts_only
def work(self, i):
"""
Internal function that performs the pair-counting
"""
n1, n2 = self.p[i]
# initialize the total arrays for this process
sum1 = numpy.zeros_like(self.sum1g)
sum2 = 1.
if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g)
if self.compute_mean_coords:
N = numpy.zeros_like(self.N)
centers_sum = [numpy.zeros_like(c) for c in self.centers]
else:
N = None; centers_sum = None
if self.bins.enable_fast_node_count:
# field x points is not supported.
# because it is more likely need to deal
# with broadcasting
sum1attrs = [ d.attr for d in self.data ]
counts, sum1c = n1.count(n2, self.bins.edges,
attrs=sum1attrs)
sum1[..., :-1] = sum1c
sum1[..., -1] = 0
else:
def callback(r, i, j):
# just call the binning function, passing the
# sum arrays to fill in
self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum)
n1.enum(n2, self.bins.Rmax, process=callback)
if not self.compute_mean_coords:
return sum1, sum2
else:
return sum1, sum2, N, centers_sum
def reduce(self, sum1, sum2, *args):
"""
The internal reduce function that sums the results from various
processors
"""
self.sum1g[...] += sum1
if not self.pts_only: self.sum2g[...] += sum2
if self.compute_mean_coords:
N, centers_sum = args
self.N[...] += N
for i in range(self.bins.Ndim):
self.centers[i][...] += centers_sum[i]
def _partition(self, tree1, tree2, size_hint=128):
if size_hint is None or size_hint == 0: # serial mode
return [(tree1, tree2)]
import heapq
def makeitem(n1, n2):
if n1.size > n2.size:
return (-n1.size, 0, (n1, n2))
else:
return (-n2.size, 1, (n1, n2))
heap = []
heapq.heappush(heap, makeitem(tree1, tree2))
while len(heap) < size_hint:
junk, split, n = heapq.heappop(heap)
if n[split].less is None:
# put it back!
heapq.heappush(heap, makeitem(*n))
break
item = list(n)
item[split] = n[split].less
heapq.heappush(heap, makeitem(*item))
item[split] = n[split].greater
heapq.heappush(heap, makeitem(*item))
p = []
while heap:
junk, split, n = heapq.heappop(heap)
p.append(n)
return p
def __enter__(self):
"""
Initialize and setup the various arrays needed to do the work
"""
tree1 = self.data[0].tree.root
tree2 = self.data[1].tree.root
self.p = self._partition(tree1, tree2, self.size_hint)
self.size = len(self.p)
# initialize arrays to hold total sum1 and sum2
# grabbing the desired shapes from the binning instance
linearshape, self.fullshape = self.bins.sum_shapes(*self.data)
self.sum1g = numpy.zeros(self.fullshape, dtype='f8').reshape(linearshape)
if not self.pts_only:
self.sum2g = numpy.zeros(self.bins.shape, dtype='f8').reshape(linearshape)
# initialize arrays for computing mean coords
# for storing the mean values in each bin
# computed when pair counting
self.N = None; self.centers = None
if self.compute_mean_coords:
self.N = numpy.zeros(self.bins.shape)
self.centers = [numpy.zeros(self.bins.shape) for i in range(self.bins.Ndim)]
return self
def __exit__(self, type, value, traceback):
"""
Finalize the work, attaching the results of the work to the parent
`paircount` instance
The following attributes are attached:
`fullsum1`, `sum1`, `fullsum2`, `sum2`, `binning`, `edges`, `centers`,
`pair_counts`, `mean_centers_sum`, `mean_centers`
"""
self.pc.fullsum1 = self.sum1g.reshape(self.fullshape).copy()
self.pc.sum1 = self.pc.fullsum1[tuple([Ellipsis] + [slice(1, -1)] * self.bins.Ndim)]
self.pc.fullsum2 = None; self.pc.sum2 = None
if not self.pts_only:
self.pc.fullsum2 = self.sum2g.reshape(self.bins.shape).copy()
self.pc.sum2 = self.pc.fullsum2[tuple([slice(1, -1)] * self.bins.Ndim)]
self.pc.binning = self.bins
self.pc.edges = self.bins.edges
self.pc.centers = self.bins.centers
# add the mean centers info
if self.compute_mean_coords:
# store the full sum too
sl = tuple([slice(1, -1)] * self.bins.Ndim)
self.pc.pair_counts = self.N[sl]
self.pc.mean_centers_sum = []
# do the division too
self.pc.mean_centers = []
with numpy.errstate(invalid='ignore'):
for i in range(self.bins.Ndim):
self.pc.mean_centers_sum.append(self.centers[i][sl])
y = self.pc.mean_centers_sum[-1] / self.pc.pair_counts
self.pc.mean_centers.append(y)
if self.bins.Ndim == 1:
self.pc.mean_centers = self.pc.mean_centers[0]
#------------------------------------------------------------------------------
# main functions for testing
#------------------------------------------------------------------------------
def _main():
pm = numpy.fromfile('A00_hodfit.raw').reshape(-1, 8)[::1, :3]
wm = numpy.ones(len(pm))
martin = points(pm, wm)
pr = numpy.random.uniform(size=(1000000, 3))
wr = numpy.ones(len(pr))
random = points(pr, wr)
binning = RBinning(numpy.linspace(0, 0.1, 40))
DR = paircount(martin, random, binning)
DD = paircount(martin, martin, binning)
RR = paircount(random, random, binning)
r = martin.norm / random.norm
return binning.centers, (DD.sum1 -
2 * r * DR.sum1 + r ** 2 * RR.sum1) / (r ** 2 * RR.sum1)
def _main2():
sim = numpy.fromfile('grid-128.raw', dtype='f4')
pos = numpy.array(numpy.unravel_index(numpy.arange(sim.size),
(128, 128, 128))).T / 128.0
numpy.random.seed(1000)
sample = numpy.random.uniform(size=len(pos)) < 0.1
value = numpy.tile(sim[sample], (2, 1)).T
# value = sim[sample]
data = field(pos[sample], value=value)
print('data ready')
binning = RBinning(numpy.linspace(0, 0.1, 40))
DD = paircount(data, data, binning)
return DD.centers, DD.sum1 / DD.sum2
def _main3():
sim = numpy.fromfile('grid-128.raw', dtype='f4')
pos = numpy.array(numpy.unravel_index(numpy.arange(sim.size),
(128, 128, 128))).T / 128.0
numpy.random.seed(1000)
sample = numpy.random.uniform(size=len(pos)) < 0.2
value = numpy.tile(sim[sample], (2, 1)).T
data = field(pos[sample], value=value)
print('data ready')
rbins = numpy.linspace(0, 0.10, 8)
Nmu = 20
DD = paircount(data, data, RmuBinning(rbins, Nmu, 0.5))
return DD
|
rainwoodman/kdcount | kdcount/correlate.py | Binning._setup | python | def _setup(self):
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0] | Set the binning info we need from the `edges` | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L135-L169 | null | class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def linear(self, **paircoords):
"""
Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index
"""
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
"""
The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace.
"""
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords)
def sum_shapes(self, data1, data2):
"""
Return the shapes of the summation arrays,
given the input data and shape of the bins
"""
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape
def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
"""
Update the mean coordinate sums
"""
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
|
rainwoodman/kdcount | kdcount/correlate.py | Binning.linear | python | def linear(self, **paircoords):
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip') | Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L172-L208 | null | class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def _setup(self):
"""
Set the binning info we need from the `edges`
"""
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0]
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
"""
The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace.
"""
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords)
def sum_shapes(self, data1, data2):
"""
Return the shapes of the summation arrays,
given the input data and shape of the bins
"""
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape
def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
"""
Update the mean coordinate sums
"""
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
|
rainwoodman/kdcount | kdcount/correlate.py | Binning.update_sums | python | def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords) | The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace. | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L238-L291 | [
"def bincount(dig, weight, minlength):\n \"\"\" bincount supporting scalar and vector weight \"\"\"\n if numpy.isscalar(weight):\n return numpy.bincount(dig, minlength=minlength) * weight\n else:\n return numpy.bincount(dig, weight, minlength)\n",
"def compute_sum_values(i, j, data1, data2):\n \"\"\"\n Return the sum1_ij and sum2_ij values given\n the input indices and data instances.\n\n Notes\n -----\n This is called in `Binning.update_sums` to compute\n the `sum1` and `sum2` contributions for indices `(i,j)`\n\n Parameters\n ----------\n i,j : array_like\n the bin indices for these pairs\n data1, data2 : `points`, `field` instances\n the two `points` or `field` objects\n\n Returns\n -------\n sum1_ij, sum2_ij : float, array_like (N,...)\n contributions to sum1, sum2 -- either a float or array\n of shape (N, ...) where N is the length of `i`, `j`\n \"\"\"\n sum1_ij = 1.\n for idx, d in zip([i,j], [data1, data2]):\n if isinstance(d, field): sum1_ij *= d.wvalue[idx]\n elif isinstance(d, points): sum1_ij *= d.weights[idx]\n else:\n raise NotImplementedError(\"data type not recognized\")\n sum2_ij = data1.weights[i] * data2.weights[j]\n\n return sum1_ij, sum2_ij\n",
"def digitize(self, r, i, j, data1, data2):\n \"\"\"\n Calculate the bin number of pairs separated by distances r,\n Use :py:meth:`linear` to convert from multi-dimension bin index to\n linear index.\n\n Parameters\n ----------\n r : array_like\n separation\n\n i, j : array_like\n index (i, j) of pairs.\n\n data1, data2 :\n The position of first point is data1.pos[i], the position of second point is\n data2.pos[j].\n\n Returns\n -------\n dig : the integer bin number for each pair\n paircoords: the coordinate of pairs, dictionary one array for each dimension, optional\n if not provided, the bin center is not computed (may raise an error if requested)\n weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only\n used for multi-channel counts.\n \"\"\"\n raise NotImplementedError()\n",
"def _update_mean_coords(self, dig, N, centers_sum, **paircoords):\n \"\"\"\n Update the mean coordinate sums\n \"\"\"\n if N is None or centers_sum is None: return\n\n N.flat[:] += utils.bincount(dig, 1., minlength=N.size)\n for i, dim in enumerate(self.dims):\n size = centers_sum[i].size\n centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)\n",
"def add_one_channel(sum1c, sum1_ijc):\n if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:\n sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)\n else:\n for d in range(sum1c.shape[0]):\n sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)\n"
] | class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def _setup(self):
"""
Set the binning info we need from the `edges`
"""
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0]
def linear(self, **paircoords):
"""
Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index
"""
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def sum_shapes(self, data1, data2):
"""
Return the shapes of the summation arrays,
given the input data and shape of the bins
"""
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape
def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
"""
Update the mean coordinate sums
"""
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
|
rainwoodman/kdcount | kdcount/correlate.py | Binning.sum_shapes | python | def sum_shapes(self, data1, data2):
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape | Return the shapes of the summation arrays,
given the input data and shape of the bins | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L293-L315 | null | class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def _setup(self):
"""
Set the binning info we need from the `edges`
"""
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0]
def linear(self, **paircoords):
"""
Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index
"""
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
"""
The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace.
"""
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords)
def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
"""
Update the mean coordinate sums
"""
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size)
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
|
rainwoodman/kdcount | kdcount/correlate.py | Binning._update_mean_coords | python | def _update_mean_coords(self, dig, N, centers_sum, **paircoords):
if N is None or centers_sum is None: return
N.flat[:] += utils.bincount(dig, 1., minlength=N.size)
for i, dim in enumerate(self.dims):
size = centers_sum[i].size
centers_sum[i].flat[:] += utils.bincount(dig, paircoords[dim], minlength=size) | Update the mean coordinate sums | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L317-L326 | [
"def bincount(dig, weight, minlength):\n \"\"\" bincount supporting scalar and vector weight \"\"\"\n if numpy.isscalar(weight):\n return numpy.bincount(dig, minlength=minlength) * weight\n else:\n return numpy.bincount(dig, weight, minlength)\n"
] | class Binning(object):
"""
Binning of the correlation function. Pairs whose distance is
within a bin is counted towards the bin.
Attributes
----------
dims : array_like
internal; descriptors of binning dimensions.
edges : array_like
edges of bins per dimension
Class Attributes
----------------
enable_fast_node_count : bool
if True, use the C implementation of node-node pair counting; this only works for point datasets;
and does not properly compute the mean coordinates.
if False, use the Python implementation of point-point pair counting; the supports all features via
the `digitize` method of the binning object.
"""
enable_fast_node_count = False # if True, allow using the C implementation of node-node pair counting on point datasets.
def __init__(self, dims, edges, channels=None):
"""
Parameters
----------
dims : list
a list specifying the binning dimension names
edges : list
a list giving the bin edges for each dimension
channels: list, or None
a list giving the channels to count of per bin; these channels can be, e.g. multipoles.
"""
if len(dims) != len(edges):
raise ValueError("size mismatch between number of dimensions and edges supplied")
self.dims = dims
self.Ndim = len(self.dims)
self.edges = edges
self.channels = channels
self.centers = []
for i in range(self.Ndim):
center = 0.5 * (self.edges[i][1:] + self.edges[i][:-1])
self.centers.append(center)
# setup the info we need from the edges
self._setup()
if self.Ndim == 1:
self.edges = self.edges[0]
self.centers = self.centers[0]
def _setup(self):
"""
Set the binning info we need from the `edges`
"""
dtype = [('inv', 'f8'), ('min', 'f8'), ('max', 'f8'),('N', 'i4'), ('spacing','object')]
dtype = numpy.dtype(dtype)
self._info = numpy.empty(self.Ndim, dtype=dtype)
self.min = self._info['min']
self.max = self._info['max']
self.N = self._info['N']
self.inv = self._info['inv']
self.spacing = self._info['spacing']
for i, dim in enumerate(self.dims):
self.N[i] = len(self.edges[i])-1
self.min[i] = self.edges[i][0]
self.max[i] = self.edges[i][-1]
# determine the type of spacing
self.spacing[i] = None
lin_diff = numpy.diff(self.edges[i])
with numpy.errstate(divide='ignore', invalid='ignore'):
log_diff = numpy.diff(numpy.log10(self.edges[i]))
if numpy.allclose(lin_diff, lin_diff[0]):
self.spacing[i] = 'linspace'
self.inv[i] = self.N[i] * 1.0 / (self.max[i] - self.min[i])
elif numpy.allclose(log_diff, log_diff[0]):
self.spacing[i] = 'logspace'
self.inv[i] = self.N[i] * 1.0 / numpy.log10(self.max[i] / self.min[i])
self.shape = self.N + 2
# store Rmax
self.Rmax = self.max[0]
def linear(self, **paircoords):
"""
Linearize bin indices.
This function is called by subclasses. Refer to the source
code of :py:class:`RBinning` for an example.
Parameters
----------
args : list
a list of bin index, (xi, yi, zi, ..)
Returns
-------
linearlized bin index
"""
N = len(paircoords[list(paircoords.keys())[0]])
integer = numpy.empty(N, ('i8', (self.Ndim,))).T
# do each dimension
for i, dim in enumerate(self.dims):
if self.spacing[i] == 'linspace':
x = paircoords[dim] - self.min[i]
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] == 'logspace':
x = paircoords[dim].copy()
x[x == 0] = self.min[i] * 0.9
x = numpy.log10(x / self.min[i])
integer[i] = numpy.ceil(x * self.inv[i])
elif self.spacing[i] is None:
edge = self.edges if self.Ndim == 1 else self.edges[i]
integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left')
return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def digitize(self, r, i, j, data1, data2):
"""
Calculate the bin number of pairs separated by distances r,
Use :py:meth:`linear` to convert from multi-dimension bin index to
linear index.
Parameters
----------
r : array_like
separation
i, j : array_like
index (i, j) of pairs.
data1, data2 :
The position of first point is data1.pos[i], the position of second point is
data2.pos[j].
Returns
-------
dig : the integer bin number for each pair
paircoords: the coordinate of pairs, dictionary one array for each dimension, optional
if not provided, the bin center is not computed (may raise an error if requested)
weights : the weighting for each bin on each channel, of shape (nchannel, len(dig)), optional, only
used for multi-channel counts.
"""
raise NotImplementedError()
def update_sums(self, r, i, j, data1, data2, sum1, sum2, N=None, centers_sum=None):
"""
The main function that digitizes the pair counts,
calls bincount for the appropriate `sum1` and `sum2`
values, and adds them to the input arrays,
will modify sum1, sum2, N, and centers_sum inplace.
"""
# the summation values for this (r,i,j)
sum1_ij, sum2_ij = compute_sum_values(i, j, data1, data2)
# digitize
digr = self.digitize(r, i, j, data1, data2)
if len(digr) == 3 and isinstance(digr[1], dict):
dig, paircoords, weights = digr
elif len(digr) == 2 and isinstance(digr[1], dict):
dig, paircoords = digr
weights = None
else:
dig = digr
paircoords = None
weights = None
# sum 1
def add_one_channel(sum1c, sum1_ijc):
if numpy.isscalar(sum1_ijc) or sum1_ijc.ndim == 1:
sum1c.flat[:] += utils.bincount(dig, sum1_ijc, minlength=sum1c.size)
else:
for d in range(sum1c.shape[0]):
sum1c[d].flat[:] += utils.bincount(dig, sum1_ijc[...,d], minlength=sum1c[d].size)
if self.channels:
if weights is None:
raise RuntimeError("`digitize` of multi channel paircount did not return a weight array for the channels")
sum1_ij = weights * sum1_ij
# sum1_ij[ichannel, dig, dim]
for ichannel in range(len(self.channels)):
add_one_channel(sum1[ichannel], sum1_ij[ichannel])
else:
# sum1_ij[dig, dim]
add_one_channel(sum1, sum1_ij)
# sum 2, if both data are not points
if not numpy.isscalar(sum2):
sum2.flat[:] += utils.bincount(dig, sum2_ij, minlength=sum2.size)
if N is not None:
if not paircoords:
raise RuntimeError("Bin center is requested but not returned by digitize")
# update the mean coords
self._update_mean_coords(dig, N, centers_sum, **paircoords)
def sum_shapes(self, data1, data2):
"""
Return the shapes of the summation arrays,
given the input data and shape of the bins
"""
# the linear shape (put extra dimensions first)
linearshape = [-1] + list(self.shape)
# determine the full shape
subshapes = [list(d.subshape) for d in [data1, data2] if isinstance(d, field)]
subshape = []
if len(subshapes) == 2:
assert subshapes[0] == subshapes[1]
subshape = subshapes[0]
elif len(subshapes) == 1:
subshape = subshapes[0]
fullshape = subshape + list(self.shape)
# prepend the shape for different channels
if self.channels:
fullshape = [len(self.channels)] + fullshape
return linearshape, fullshape
def update_mean_coords(self, dig, N, centers_sum, **paircoords):
warnings.warn("update_mean_coords is deprecated. Return a dictionary of paircoords in digitize function instead.", DeprecationWarning, stacklevel=2)
self._update_mean_coords(dig, N, centers_sum, **paircoords)
|
rainwoodman/kdcount | kdcount/correlate.py | paircount_queue.work | python | def work(self, i):
n1, n2 = self.p[i]
# initialize the total arrays for this process
sum1 = numpy.zeros_like(self.sum1g)
sum2 = 1.
if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g)
if self.compute_mean_coords:
N = numpy.zeros_like(self.N)
centers_sum = [numpy.zeros_like(c) for c in self.centers]
else:
N = None; centers_sum = None
if self.bins.enable_fast_node_count:
# field x points is not supported.
# because it is more likely need to deal
# with broadcasting
sum1attrs = [ d.attr for d in self.data ]
counts, sum1c = n1.count(n2, self.bins.edges,
attrs=sum1attrs)
sum1[..., :-1] = sum1c
sum1[..., -1] = 0
else:
def callback(r, i, j):
# just call the binning function, passing the
# sum arrays to fill in
self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum)
n1.enum(n2, self.bins.Rmax, process=callback)
if not self.compute_mean_coords:
return sum1, sum2
else:
return sum1, sum2, N, centers_sum | Internal function that performs the pair-counting | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L739-L778 | null | class paircount_queue(object):
"""
A queue of paircount jobs. roughly size_hint jobs are created, and they are
reduced to the paircount objects when the queue is joined.
"""
def __init__(self, pc, binning, data, size_hint, compute_mean_coords, pts_only):
"""
Parameters
----------
pc : `paircount`
the parent pair count object, which we will attach the final results to
binning : `Binning`
the binning instance
data : tuple
tuple of the two data trees that we are correlating
size_hint : int, optional
the number of jobs to create, as an hint. None or zero to create 1 job.
compute_mean_coords : bool, optional
whether to compute the average coordinate value of each pair per bin
"""
self.pc = pc
self.bins = binning
self.data = data
self.size_hint = size_hint
self.compute_mean_coords = compute_mean_coords
self.pts_only = pts_only
def reduce(self, sum1, sum2, *args):
"""
The internal reduce function that sums the results from various
processors
"""
self.sum1g[...] += sum1
if not self.pts_only: self.sum2g[...] += sum2
if self.compute_mean_coords:
N, centers_sum = args
self.N[...] += N
for i in range(self.bins.Ndim):
self.centers[i][...] += centers_sum[i]
def _partition(self, tree1, tree2, size_hint=128):
if size_hint is None or size_hint == 0: # serial mode
return [(tree1, tree2)]
import heapq
def makeitem(n1, n2):
if n1.size > n2.size:
return (-n1.size, 0, (n1, n2))
else:
return (-n2.size, 1, (n1, n2))
heap = []
heapq.heappush(heap, makeitem(tree1, tree2))
while len(heap) < size_hint:
junk, split, n = heapq.heappop(heap)
if n[split].less is None:
# put it back!
heapq.heappush(heap, makeitem(*n))
break
item = list(n)
item[split] = n[split].less
heapq.heappush(heap, makeitem(*item))
item[split] = n[split].greater
heapq.heappush(heap, makeitem(*item))
p = []
while heap:
junk, split, n = heapq.heappop(heap)
p.append(n)
return p
def __enter__(self):
"""
Initialize and setup the various arrays needed to do the work
"""
tree1 = self.data[0].tree.root
tree2 = self.data[1].tree.root
self.p = self._partition(tree1, tree2, self.size_hint)
self.size = len(self.p)
# initialize arrays to hold total sum1 and sum2
# grabbing the desired shapes from the binning instance
linearshape, self.fullshape = self.bins.sum_shapes(*self.data)
self.sum1g = numpy.zeros(self.fullshape, dtype='f8').reshape(linearshape)
if not self.pts_only:
self.sum2g = numpy.zeros(self.bins.shape, dtype='f8').reshape(linearshape)
# initialize arrays for computing mean coords
# for storing the mean values in each bin
# computed when pair counting
self.N = None; self.centers = None
if self.compute_mean_coords:
self.N = numpy.zeros(self.bins.shape)
self.centers = [numpy.zeros(self.bins.shape) for i in range(self.bins.Ndim)]
return self
def __exit__(self, type, value, traceback):
"""
Finalize the work, attaching the results of the work to the parent
`paircount` instance
The following attributes are attached:
`fullsum1`, `sum1`, `fullsum2`, `sum2`, `binning`, `edges`, `centers`,
`pair_counts`, `mean_centers_sum`, `mean_centers`
"""
self.pc.fullsum1 = self.sum1g.reshape(self.fullshape).copy()
self.pc.sum1 = self.pc.fullsum1[tuple([Ellipsis] + [slice(1, -1)] * self.bins.Ndim)]
self.pc.fullsum2 = None; self.pc.sum2 = None
if not self.pts_only:
self.pc.fullsum2 = self.sum2g.reshape(self.bins.shape).copy()
self.pc.sum2 = self.pc.fullsum2[tuple([slice(1, -1)] * self.bins.Ndim)]
self.pc.binning = self.bins
self.pc.edges = self.bins.edges
self.pc.centers = self.bins.centers
# add the mean centers info
if self.compute_mean_coords:
# store the full sum too
sl = tuple([slice(1, -1)] * self.bins.Ndim)
self.pc.pair_counts = self.N[sl]
self.pc.mean_centers_sum = []
# do the division too
self.pc.mean_centers = []
with numpy.errstate(invalid='ignore'):
for i in range(self.bins.Ndim):
self.pc.mean_centers_sum.append(self.centers[i][sl])
y = self.pc.mean_centers_sum[-1] / self.pc.pair_counts
self.pc.mean_centers.append(y)
if self.bins.Ndim == 1:
self.pc.mean_centers = self.pc.mean_centers[0]
|
rainwoodman/kdcount | kdcount/correlate.py | paircount_queue.reduce | python | def reduce(self, sum1, sum2, *args):
self.sum1g[...] += sum1
if not self.pts_only: self.sum2g[...] += sum2
if self.compute_mean_coords:
N, centers_sum = args
self.N[...] += N
for i in range(self.bins.Ndim):
self.centers[i][...] += centers_sum[i] | The internal reduce function that sums the results from various
processors | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/correlate.py#L780-L792 | null | class paircount_queue(object):
"""
A queue of paircount jobs. roughly size_hint jobs are created, and they are
reduced to the paircount objects when the queue is joined.
"""
def __init__(self, pc, binning, data, size_hint, compute_mean_coords, pts_only):
"""
Parameters
----------
pc : `paircount`
the parent pair count object, which we will attach the final results to
binning : `Binning`
the binning instance
data : tuple
tuple of the two data trees that we are correlating
size_hint : int, optional
the number of jobs to create, as an hint. None or zero to create 1 job.
compute_mean_coords : bool, optional
whether to compute the average coordinate value of each pair per bin
"""
self.pc = pc
self.bins = binning
self.data = data
self.size_hint = size_hint
self.compute_mean_coords = compute_mean_coords
self.pts_only = pts_only
def work(self, i):
"""
Internal function that performs the pair-counting
"""
n1, n2 = self.p[i]
# initialize the total arrays for this process
sum1 = numpy.zeros_like(self.sum1g)
sum2 = 1.
if not self.pts_only: sum2 = numpy.zeros_like(self.sum2g)
if self.compute_mean_coords:
N = numpy.zeros_like(self.N)
centers_sum = [numpy.zeros_like(c) for c in self.centers]
else:
N = None; centers_sum = None
if self.bins.enable_fast_node_count:
# field x points is not supported.
# because it is more likely need to deal
# with broadcasting
sum1attrs = [ d.attr for d in self.data ]
counts, sum1c = n1.count(n2, self.bins.edges,
attrs=sum1attrs)
sum1[..., :-1] = sum1c
sum1[..., -1] = 0
else:
def callback(r, i, j):
# just call the binning function, passing the
# sum arrays to fill in
self.bins.update_sums(r, i, j, self.data[0], self.data[1], sum1, sum2, N=N, centers_sum=centers_sum)
n1.enum(n2, self.bins.Rmax, process=callback)
if not self.compute_mean_coords:
return sum1, sum2
else:
return sum1, sum2, N, centers_sum
def _partition(self, tree1, tree2, size_hint=128):
if size_hint is None or size_hint == 0: # serial mode
return [(tree1, tree2)]
import heapq
def makeitem(n1, n2):
if n1.size > n2.size:
return (-n1.size, 0, (n1, n2))
else:
return (-n2.size, 1, (n1, n2))
heap = []
heapq.heappush(heap, makeitem(tree1, tree2))
while len(heap) < size_hint:
junk, split, n = heapq.heappop(heap)
if n[split].less is None:
# put it back!
heapq.heappush(heap, makeitem(*n))
break
item = list(n)
item[split] = n[split].less
heapq.heappush(heap, makeitem(*item))
item[split] = n[split].greater
heapq.heappush(heap, makeitem(*item))
p = []
while heap:
junk, split, n = heapq.heappop(heap)
p.append(n)
return p
def __enter__(self):
"""
Initialize and setup the various arrays needed to do the work
"""
tree1 = self.data[0].tree.root
tree2 = self.data[1].tree.root
self.p = self._partition(tree1, tree2, self.size_hint)
self.size = len(self.p)
# initialize arrays to hold total sum1 and sum2
# grabbing the desired shapes from the binning instance
linearshape, self.fullshape = self.bins.sum_shapes(*self.data)
self.sum1g = numpy.zeros(self.fullshape, dtype='f8').reshape(linearshape)
if not self.pts_only:
self.sum2g = numpy.zeros(self.bins.shape, dtype='f8').reshape(linearshape)
# initialize arrays for computing mean coords
# for storing the mean values in each bin
# computed when pair counting
self.N = None; self.centers = None
if self.compute_mean_coords:
self.N = numpy.zeros(self.bins.shape)
self.centers = [numpy.zeros(self.bins.shape) for i in range(self.bins.Ndim)]
return self
def __exit__(self, type, value, traceback):
"""
Finalize the work, attaching the results of the work to the parent
`paircount` instance
The following attributes are attached:
`fullsum1`, `sum1`, `fullsum2`, `sum2`, `binning`, `edges`, `centers`,
`pair_counts`, `mean_centers_sum`, `mean_centers`
"""
self.pc.fullsum1 = self.sum1g.reshape(self.fullshape).copy()
self.pc.sum1 = self.pc.fullsum1[tuple([Ellipsis] + [slice(1, -1)] * self.bins.Ndim)]
self.pc.fullsum2 = None; self.pc.sum2 = None
if not self.pts_only:
self.pc.fullsum2 = self.sum2g.reshape(self.bins.shape).copy()
self.pc.sum2 = self.pc.fullsum2[tuple([slice(1, -1)] * self.bins.Ndim)]
self.pc.binning = self.bins
self.pc.edges = self.bins.edges
self.pc.centers = self.bins.centers
# add the mean centers info
if self.compute_mean_coords:
# store the full sum too
sl = tuple([slice(1, -1)] * self.bins.Ndim)
self.pc.pair_counts = self.N[sl]
self.pc.mean_centers_sum = []
# do the division too
self.pc.mean_centers = []
with numpy.errstate(invalid='ignore'):
for i in range(self.bins.Ndim):
self.pc.mean_centers_sum.append(self.centers[i][sl])
y = self.pc.mean_centers_sum[-1] / self.pc.pair_counts
self.pc.mean_centers.append(y)
if self.bins.Ndim == 1:
self.pc.mean_centers = self.pc.mean_centers[0]
|
rainwoodman/kdcount | kdcount/cluster.py | fof.find | python | def find(self, groupid):
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]] | return all of the indices of particles of groupid | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L79-L82 | null | class fof(object):
"""
Friend of Friend clustering
Attributes
----------
data : :py:class:`kdcount.models.dataset`
data set (positions of particles complied into a KD-Tree
linking_length : float
linking length, in data units
np : int
parallel processes to use (0 to disable)
verbose : boolean
print some verbose information
N : int
number of clusters identified
labels : array_like
the label (cluster id) of each object
length : array_like
number of particles per cluster
offset : array_like
offset of the first particle in indices
indices : array_like
index of particles indices[offset[i]:length[i]] is the indices
of particles in cluster i.
"""
def __init__(self, data, linking_length, np=None):
self.data = data
self.linking_length = linking_length
head = utils.empty(len(data), dtype='intp')
# this will set the head to a particle index
# need to convert this to a feature id with unique.
data.tree.root.fof(linking_length, head)
u, labels = numpy.unique(head, return_inverse=True)
self.N = len(u)
if len(labels) > 0:
length = utils.bincount(labels, 1, self.N)
else:
length = numpy.empty_like(labels)
# for example old labels == 5 is the longest halo
# then a[0] == 5
# we want to replace in labels 5 to 0
# thus we need an array inv[5] == 0
a = length.argsort()[::-1]
length = length[a]
inv = numpy.empty(self.N, dtype='intp')
inv[a] = numpy.arange(self.N)
#print inv.max(), inv.min()
self.labels = inv[labels]
self.length = length
self.offset = numpy.empty_like(length)
if len(labels) > 0: # if there is no particles length will be []!
self.offset[0] = 0
self.offset[1:] = length.cumsum()[:-1]
self.indices = self.labels.argsort()
def sum(self, weights=None):
""" return the sum of weights of each object """
if weights is None:
weights = self.data.weights
return utils.bincount(self.labels, weights, self.N)
def center(self, weights=None):
""" return the center of each object """
if weights is None:
weights = self.data.weights
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp
|
rainwoodman/kdcount | kdcount/cluster.py | fof.sum | python | def sum(self, weights=None):
if weights is None:
weights = self.data.weights
return utils.bincount(self.labels, weights, self.N) | return the sum of weights of each object | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L84-L88 | [
"def bincount(dig, weight, minlength):\n \"\"\" bincount supporting scalar and vector weight \"\"\"\n if numpy.isscalar(weight):\n return numpy.bincount(dig, minlength=minlength) * weight\n else:\n return numpy.bincount(dig, weight, minlength)\n"
] | class fof(object):
"""
Friend of Friend clustering
Attributes
----------
data : :py:class:`kdcount.models.dataset`
data set (positions of particles complied into a KD-Tree
linking_length : float
linking length, in data units
np : int
parallel processes to use (0 to disable)
verbose : boolean
print some verbose information
N : int
number of clusters identified
labels : array_like
the label (cluster id) of each object
length : array_like
number of particles per cluster
offset : array_like
offset of the first particle in indices
indices : array_like
index of particles indices[offset[i]:length[i]] is the indices
of particles in cluster i.
"""
def __init__(self, data, linking_length, np=None):
self.data = data
self.linking_length = linking_length
head = utils.empty(len(data), dtype='intp')
# this will set the head to a particle index
# need to convert this to a feature id with unique.
data.tree.root.fof(linking_length, head)
u, labels = numpy.unique(head, return_inverse=True)
self.N = len(u)
if len(labels) > 0:
length = utils.bincount(labels, 1, self.N)
else:
length = numpy.empty_like(labels)
# for example old labels == 5 is the longest halo
# then a[0] == 5
# we want to replace in labels 5 to 0
# thus we need an array inv[5] == 0
a = length.argsort()[::-1]
length = length[a]
inv = numpy.empty(self.N, dtype='intp')
inv[a] = numpy.arange(self.N)
#print inv.max(), inv.min()
self.labels = inv[labels]
self.length = length
self.offset = numpy.empty_like(length)
if len(labels) > 0: # if there is no particles length will be []!
self.offset[0] = 0
self.offset[1:] = length.cumsum()[:-1]
self.indices = self.labels.argsort()
def find(self, groupid):
""" return all of the indices of particles of groupid """
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]]
def center(self, weights=None):
""" return the center of each object """
if weights is None:
weights = self.data.weights
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp
|
rainwoodman/kdcount | kdcount/cluster.py | fof.center | python | def center(self, weights=None):
if weights is None:
weights = self.data.weights
mass = utils.bincount(self.labels, weights, self.N)
cp = numpy.empty((len(mass), self.data.pos.shape[-1]), 'f8')
for d in range(self.data.pos.shape[-1]):
cp[..., d] = utils.bincount(self.labels, weights *
self.data.pos[..., d], self.N)
cp[..., d] /= mass
return cp | return the center of each object | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/cluster.py#L90-L100 | [
"def bincount(dig, weight, minlength):\n \"\"\" bincount supporting scalar and vector weight \"\"\"\n if numpy.isscalar(weight):\n return numpy.bincount(dig, minlength=minlength) * weight\n else:\n return numpy.bincount(dig, weight, minlength)\n"
] | class fof(object):
"""
Friend of Friend clustering
Attributes
----------
data : :py:class:`kdcount.models.dataset`
data set (positions of particles complied into a KD-Tree
linking_length : float
linking length, in data units
np : int
parallel processes to use (0 to disable)
verbose : boolean
print some verbose information
N : int
number of clusters identified
labels : array_like
the label (cluster id) of each object
length : array_like
number of particles per cluster
offset : array_like
offset of the first particle in indices
indices : array_like
index of particles indices[offset[i]:length[i]] is the indices
of particles in cluster i.
"""
def __init__(self, data, linking_length, np=None):
self.data = data
self.linking_length = linking_length
head = utils.empty(len(data), dtype='intp')
# this will set the head to a particle index
# need to convert this to a feature id with unique.
data.tree.root.fof(linking_length, head)
u, labels = numpy.unique(head, return_inverse=True)
self.N = len(u)
if len(labels) > 0:
length = utils.bincount(labels, 1, self.N)
else:
length = numpy.empty_like(labels)
# for example old labels == 5 is the longest halo
# then a[0] == 5
# we want to replace in labels 5 to 0
# thus we need an array inv[5] == 0
a = length.argsort()[::-1]
length = length[a]
inv = numpy.empty(self.N, dtype='intp')
inv[a] = numpy.arange(self.N)
#print inv.max(), inv.min()
self.labels = inv[labels]
self.length = length
self.offset = numpy.empty_like(length)
if len(labels) > 0: # if there is no particles length will be []!
self.offset[0] = 0
self.offset[1:] = length.cumsum()[:-1]
self.indices = self.labels.argsort()
def find(self, groupid):
""" return all of the indices of particles of groupid """
return self.indices[self.offset[groupid]
:self.offset[groupid]+ self.length[groupid]]
def sum(self, weights=None):
""" return the sum of weights of each object """
if weights is None:
weights = self.data.weights
return utils.bincount(self.labels, weights, self.N)
|
rainwoodman/kdcount | kdcount/sphere.py | bootstrap | python | def bootstrap(nside, rand, nbar, *data):
def split(data, indices, axis):
""" This function splits array. It fixes the bug
in numpy that zero length array are improperly handled.
In the future this will be fixed.
"""
s = []
s.append(slice(0, indices[0]))
for i in range(len(indices) - 1):
s.append(slice(indices[i], indices[i+1]))
s.append(slice(indices[-1], None))
rt = []
for ss in s:
ind = [slice(None, None, None) for i in range(len(data.shape))]
ind[axis] = ss
ind = tuple(ind)
rt.append(data[ind])
return rt
def hpsplit(nside, data):
# data is (RA, DEC)
RA, DEC = data
pix = radec2pix(nside, RA, DEC)
n = numpy.bincount(pix)
a = numpy.argsort(pix)
data = numpy.array(data)[:, a]
rt = split(data, n.cumsum(), axis=-1)
return rt
# mean area of sky.
Abar = 41252.96 / nside2npix(nside)
rand = hpsplit(nside, rand)
if len(data) > 0:
data = [list(i) for i in zip(*[hpsplit(nside, d1) for d1 in data])]
else:
data = [[] for i in range(len(rand))]
heap = []
j = 0
for r, d in zip(rand, data):
if len(r[0]) == 0: continue
a = 1.0 * len(r[0]) / nbar
j = j + 1
if len(heap) == 0:
heapq.heappush(heap, (a, j, r, d))
else:
a0, j0, r0, d0 = heapq.heappop(heap)
if a0 + a < Abar:
a0 += a
d0 = [
numpy.concatenate((d0[i], d[i]), axis=-1)
for i in range(len(d))
]
r0 = numpy.concatenate((r0, r), axis=-1)
else:
heapq.heappush(heap, (a, j, r, d))
heapq.heappush(heap, (a0, j0, r0, d0))
for i in range(len(heap)):
area, j, r, d = heapq.heappop(heap)
rt = [area, r] + d
yield rt | This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/sphere.py#L65-L153 | [
"def nside2npix(nside):\n return nside * nside * 12\n",
"def hpsplit(nside, data):\n # data is (RA, DEC)\n RA, DEC = data\n pix = radec2pix(nside, RA, DEC)\n n = numpy.bincount(pix)\n a = numpy.argsort(pix)\n data = numpy.array(data)[:, a]\n rt = split(data, n.cumsum(), axis=-1)\n return rt\n"
] | from . import models
from .correlate import RBinning
import numpy
class points(models.points):
def __init__(self, ra, dec, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.points.__init__(self, pos, weights, boxsize)
def __getitem__(self, index):
return points(self.ra[index], self.dec[index], self.weights[index], self.boxsize);
class field(models.field):
def __init__(self, ra, dec, value, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.field.__init__(self, pos, value, weights, boxsize)
def __getitem__(self, index):
return field(self.ra[index], self.dec[index], self.value[index], self.weights[index], self.boxsize)
class AngularBinning(RBinning):
def __init__(self, angbins, **kwargs):
rbins = 2 * numpy.sin(0.5 * numpy.radians(angbins))
RBinning.__init__(self, rbins, **kwargs)
@property
def angular_centers(self):
return 2 * numpy.arcsin(self.centers * 0.5) * (180. / numpy.pi)
@property
def angular_edges(self):
return 2 * numpy.arcsin(self.edges * 0.5) * (180. / numpy.pi)
def digitize(self, r, i, j, data1, data2):
# linear bins
dig = self.linear(r=r)
theta = 2 * numpy.arcsin(r * 0.5) * (180. / numpy.pi)
return dig, dict(r=theta)
class FastAngularBinning(AngularBinning):
enable_fast_node_counting = True
import heapq
def pix2radec(nside, pix):
theta, phi = pix2ang(nside, pix)
return numpy.degrees(phi), 90 - numpy.degrees(theta)
def radec2pix(nside, ra, dec):
phi = numpy.radians(ra)
theta = numpy.radians(90 - dec)
return ang2pix(nside, theta, phi)
def nside2npix(nside):
return nside * nside * 12
def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result
def pix2ang(nside, pix):
r"""Convert pixel to angle :math:`\theta` :math:`\phi`.
nside and pix are broadcast with numpy rules.
Returns: theta, phi
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)
ncap = nside * (nside - 1) * 2
npix = 12 * nside * nside
def northpole(pix, npix):
iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2
iphi = (pix + 1) - 2 * iring * (iring - 1)
z = 1.0 - (iring*iring) * 4. / npix
phi = (iphi - 0.5) * 0.5 * numpy.pi / iring
return z, phi
def equatorial(pix, nside, npix, ncap):
ip = pix - ncap
iring = ip // (4 * nside) + nside
iphi = ip % (4 * nside) + 1
fodd = (((iring + nside) &1) + 1.) * 0.5
z = (2 * nside - iring) * nside * 8.0 / npix
phi = (iphi - fodd) * (0.5 * numpy.pi) / nside
return z, phi
def southpole(pix, npix):
ip = npix - pix
iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2
iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))
z = -1 + (iring * iring) * 4. / npix
phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring
return z, phi
mask1 = pix < ncap
mask2 = (~mask1) & (pix < npix - ncap)
mask3 = pix >= npix - ncap
z = numpy.zeros(pix.shape, dtype='f8')
phi = numpy.zeros(pix.shape, dtype='f8')
z[mask1], phi[mask1] = northpole(pix[mask1], npix[mask1])
z[mask2], phi[mask2] = equatorial(pix[mask2], nside[mask2], npix[mask2], ncap[mask2])
z[mask3], phi[mask3] = southpole(pix[mask3], npix[mask3])
return numpy.arccos(z), phi
|
rainwoodman/kdcount | kdcount/sphere.py | ang2pix | python | def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result | r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/sphere.py#L167-L219 | [
"def equatorial(nside, tt, z):\n t1 = nside * (0.5 + tt)\n t2 = nside * z * 0.75\n jp = (t1 - t2).astype('i8')\n jm = (t1 + t2).astype('i8')\n ir = nside + 1 + jp - jm # in {1, 2n + 1}\n kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd\n\n ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}\n\n ip = ip % (4 * nside)\n return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip\n",
"def polecaps(nside, tt, z, s):\n tp = tt - numpy.floor(tt)\n za = numpy.abs(z)\n tmp = nside * s / ((1 + za) / 3) ** 0.5\n mp = za > 0.99\n tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5\n jp = (tp * tmp).astype('i8')\n jm = ((1 - tp) * tmp).astype('i8')\n ir = jp + jm + 1\n ip = (tt * ir).astype('i8')\n ip = ip % (4 * ir)\n\n r1 = 2 * ir * (ir - 1)\n r2 = 2 * ir * (ir + 1)\n\n r = numpy.empty_like(r1)\n\n r[z > 0] = r1[z > 0] + ip[z > 0]\n r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]\n return r\n"
] | from . import models
from .correlate import RBinning
import numpy
class points(models.points):
def __init__(self, ra, dec, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.points.__init__(self, pos, weights, boxsize)
def __getitem__(self, index):
return points(self.ra[index], self.dec[index], self.weights[index], self.boxsize);
class field(models.field):
def __init__(self, ra, dec, value, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.field.__init__(self, pos, value, weights, boxsize)
def __getitem__(self, index):
return field(self.ra[index], self.dec[index], self.value[index], self.weights[index], self.boxsize)
class AngularBinning(RBinning):
def __init__(self, angbins, **kwargs):
rbins = 2 * numpy.sin(0.5 * numpy.radians(angbins))
RBinning.__init__(self, rbins, **kwargs)
@property
def angular_centers(self):
return 2 * numpy.arcsin(self.centers * 0.5) * (180. / numpy.pi)
@property
def angular_edges(self):
return 2 * numpy.arcsin(self.edges * 0.5) * (180. / numpy.pi)
def digitize(self, r, i, j, data1, data2):
# linear bins
dig = self.linear(r=r)
theta = 2 * numpy.arcsin(r * 0.5) * (180. / numpy.pi)
return dig, dict(r=theta)
class FastAngularBinning(AngularBinning):
enable_fast_node_counting = True
import heapq
def bootstrap(nside, rand, nbar, *data):
""" This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass
"""
def split(data, indices, axis):
""" This function splits array. It fixes the bug
in numpy that zero length array are improperly handled.
In the future this will be fixed.
"""
s = []
s.append(slice(0, indices[0]))
for i in range(len(indices) - 1):
s.append(slice(indices[i], indices[i+1]))
s.append(slice(indices[-1], None))
rt = []
for ss in s:
ind = [slice(None, None, None) for i in range(len(data.shape))]
ind[axis] = ss
ind = tuple(ind)
rt.append(data[ind])
return rt
def hpsplit(nside, data):
# data is (RA, DEC)
RA, DEC = data
pix = radec2pix(nside, RA, DEC)
n = numpy.bincount(pix)
a = numpy.argsort(pix)
data = numpy.array(data)[:, a]
rt = split(data, n.cumsum(), axis=-1)
return rt
# mean area of sky.
Abar = 41252.96 / nside2npix(nside)
rand = hpsplit(nside, rand)
if len(data) > 0:
data = [list(i) for i in zip(*[hpsplit(nside, d1) for d1 in data])]
else:
data = [[] for i in range(len(rand))]
heap = []
j = 0
for r, d in zip(rand, data):
if len(r[0]) == 0: continue
a = 1.0 * len(r[0]) / nbar
j = j + 1
if len(heap) == 0:
heapq.heappush(heap, (a, j, r, d))
else:
a0, j0, r0, d0 = heapq.heappop(heap)
if a0 + a < Abar:
a0 += a
d0 = [
numpy.concatenate((d0[i], d[i]), axis=-1)
for i in range(len(d))
]
r0 = numpy.concatenate((r0, r), axis=-1)
else:
heapq.heappush(heap, (a, j, r, d))
heapq.heappush(heap, (a0, j0, r0, d0))
for i in range(len(heap)):
area, j, r, d = heapq.heappop(heap)
rt = [area, r] + d
yield rt
def pix2radec(nside, pix):
theta, phi = pix2ang(nside, pix)
return numpy.degrees(phi), 90 - numpy.degrees(theta)
def radec2pix(nside, ra, dec):
phi = numpy.radians(ra)
theta = numpy.radians(90 - dec)
return ang2pix(nside, theta, phi)
def nside2npix(nside):
return nside * nside * 12
def pix2ang(nside, pix):
r"""Convert pixel to angle :math:`\theta` :math:`\phi`.
nside and pix are broadcast with numpy rules.
Returns: theta, phi
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)
ncap = nside * (nside - 1) * 2
npix = 12 * nside * nside
def northpole(pix, npix):
iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2
iphi = (pix + 1) - 2 * iring * (iring - 1)
z = 1.0 - (iring*iring) * 4. / npix
phi = (iphi - 0.5) * 0.5 * numpy.pi / iring
return z, phi
def equatorial(pix, nside, npix, ncap):
ip = pix - ncap
iring = ip // (4 * nside) + nside
iphi = ip % (4 * nside) + 1
fodd = (((iring + nside) &1) + 1.) * 0.5
z = (2 * nside - iring) * nside * 8.0 / npix
phi = (iphi - fodd) * (0.5 * numpy.pi) / nside
return z, phi
def southpole(pix, npix):
ip = npix - pix
iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2
iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))
z = -1 + (iring * iring) * 4. / npix
phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring
return z, phi
mask1 = pix < ncap
mask2 = (~mask1) & (pix < npix - ncap)
mask3 = pix >= npix - ncap
z = numpy.zeros(pix.shape, dtype='f8')
phi = numpy.zeros(pix.shape, dtype='f8')
z[mask1], phi[mask1] = northpole(pix[mask1], npix[mask1])
z[mask2], phi[mask2] = equatorial(pix[mask2], nside[mask2], npix[mask2], ncap[mask2])
z[mask3], phi[mask3] = southpole(pix[mask3], npix[mask3])
return numpy.arccos(z), phi
|
rainwoodman/kdcount | kdcount/sphere.py | pix2ang | python | def pix2ang(nside, pix):
r"""Convert pixel to angle :math:`\theta` :math:`\phi`.
nside and pix are broadcast with numpy rules.
Returns: theta, phi
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, pix = numpy.lib.stride_tricks.broadcast_arrays(nside, pix)
ncap = nside * (nside - 1) * 2
npix = 12 * nside * nside
def northpole(pix, npix):
iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2
iphi = (pix + 1) - 2 * iring * (iring - 1)
z = 1.0 - (iring*iring) * 4. / npix
phi = (iphi - 0.5) * 0.5 * numpy.pi / iring
return z, phi
def equatorial(pix, nside, npix, ncap):
ip = pix - ncap
iring = ip // (4 * nside) + nside
iphi = ip % (4 * nside) + 1
fodd = (((iring + nside) &1) + 1.) * 0.5
z = (2 * nside - iring) * nside * 8.0 / npix
phi = (iphi - fodd) * (0.5 * numpy.pi) / nside
return z, phi
def southpole(pix, npix):
ip = npix - pix
iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2
iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))
z = -1 + (iring * iring) * 4. / npix
phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring
return z, phi
mask1 = pix < ncap
mask2 = (~mask1) & (pix < npix - ncap)
mask3 = pix >= npix - ncap
z = numpy.zeros(pix.shape, dtype='f8')
phi = numpy.zeros(pix.shape, dtype='f8')
z[mask1], phi[mask1] = northpole(pix[mask1], npix[mask1])
z[mask2], phi[mask2] = equatorial(pix[mask2], nside[mask2], npix[mask2], ncap[mask2])
z[mask3], phi[mask3] = southpole(pix[mask3], npix[mask3])
return numpy.arccos(z), phi | r"""Convert pixel to angle :math:`\theta` :math:`\phi`.
nside and pix are broadcast with numpy rules.
Returns: theta, phi
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/sphere.py#L221-L270 | [
"def northpole(pix, npix):\n iring = (1 + ((1 + 2 * pix) ** 0.5)).astype('i8') // 2\n iphi = (pix + 1) - 2 * iring * (iring - 1)\n z = 1.0 - (iring*iring) * 4. / npix\n phi = (iphi - 0.5) * 0.5 * numpy.pi / iring\n return z, phi\n",
"def equatorial(pix, nside, npix, ncap):\n ip = pix - ncap\n iring = ip // (4 * nside) + nside\n iphi = ip % (4 * nside) + 1\n fodd = (((iring + nside) &1) + 1.) * 0.5\n z = (2 * nside - iring) * nside * 8.0 / npix\n phi = (iphi - fodd) * (0.5 * numpy.pi) / nside\n return z, phi\n",
"def southpole(pix, npix):\n ip = npix - pix\n iring = (1 + ((2 * ip - 1)**0.5).astype('i8')) // 2\n iphi = 4 * iring + 1 - (ip - 2 * iring * (iring - 1))\n z = -1 + (iring * iring) * 4. / npix\n phi = (iphi - 0.5 ) * 0.5 * numpy.pi / iring\n return z, phi\n"
] | from . import models
from .correlate import RBinning
import numpy
class points(models.points):
def __init__(self, ra, dec, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.points.__init__(self, pos, weights, boxsize)
def __getitem__(self, index):
return points(self.ra[index], self.dec[index], self.weights[index], self.boxsize);
class field(models.field):
def __init__(self, ra, dec, value, weights=None, boxsize=None):
self.ra = ra
self.dec = dec
ra = ra * (numpy.pi / 180.)
dec = dec * (numpy.pi / 180.)
dtype = numpy.dtype((ra.dtype, 3))
pos = numpy.empty(len(ra), dtype=dtype)
pos[:, 2] = numpy.sin(dec)
r = numpy.cos(dec)
pos[:, 0] = numpy.sin(ra) * r
pos[:, 1] = numpy.cos(ra) * r
models.field.__init__(self, pos, value, weights, boxsize)
def __getitem__(self, index):
return field(self.ra[index], self.dec[index], self.value[index], self.weights[index], self.boxsize)
class AngularBinning(RBinning):
def __init__(self, angbins, **kwargs):
rbins = 2 * numpy.sin(0.5 * numpy.radians(angbins))
RBinning.__init__(self, rbins, **kwargs)
@property
def angular_centers(self):
return 2 * numpy.arcsin(self.centers * 0.5) * (180. / numpy.pi)
@property
def angular_edges(self):
return 2 * numpy.arcsin(self.edges * 0.5) * (180. / numpy.pi)
def digitize(self, r, i, j, data1, data2):
# linear bins
dig = self.linear(r=r)
theta = 2 * numpy.arcsin(r * 0.5) * (180. / numpy.pi)
return dig, dict(r=theta)
class FastAngularBinning(AngularBinning):
enable_fast_node_counting = True
import heapq
def bootstrap(nside, rand, nbar, *data):
""" This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass
"""
def split(data, indices, axis):
""" This function splits array. It fixes the bug
in numpy that zero length array are improperly handled.
In the future this will be fixed.
"""
s = []
s.append(slice(0, indices[0]))
for i in range(len(indices) - 1):
s.append(slice(indices[i], indices[i+1]))
s.append(slice(indices[-1], None))
rt = []
for ss in s:
ind = [slice(None, None, None) for i in range(len(data.shape))]
ind[axis] = ss
ind = tuple(ind)
rt.append(data[ind])
return rt
def hpsplit(nside, data):
# data is (RA, DEC)
RA, DEC = data
pix = radec2pix(nside, RA, DEC)
n = numpy.bincount(pix)
a = numpy.argsort(pix)
data = numpy.array(data)[:, a]
rt = split(data, n.cumsum(), axis=-1)
return rt
# mean area of sky.
Abar = 41252.96 / nside2npix(nside)
rand = hpsplit(nside, rand)
if len(data) > 0:
data = [list(i) for i in zip(*[hpsplit(nside, d1) for d1 in data])]
else:
data = [[] for i in range(len(rand))]
heap = []
j = 0
for r, d in zip(rand, data):
if len(r[0]) == 0: continue
a = 1.0 * len(r[0]) / nbar
j = j + 1
if len(heap) == 0:
heapq.heappush(heap, (a, j, r, d))
else:
a0, j0, r0, d0 = heapq.heappop(heap)
if a0 + a < Abar:
a0 += a
d0 = [
numpy.concatenate((d0[i], d[i]), axis=-1)
for i in range(len(d))
]
r0 = numpy.concatenate((r0, r), axis=-1)
else:
heapq.heappush(heap, (a, j, r, d))
heapq.heappush(heap, (a0, j0, r0, d0))
for i in range(len(heap)):
area, j, r, d = heapq.heappop(heap)
rt = [area, r] + d
yield rt
def pix2radec(nside, pix):
theta, phi = pix2ang(nside, pix)
return numpy.degrees(phi), 90 - numpy.degrees(theta)
def radec2pix(nside, ra, dec):
phi = numpy.radians(ra)
theta = numpy.radians(90 - dec)
return ang2pix(nside, theta, phi)
def nside2npix(nside):
return nside * nside * 12
def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enumiter | python | def enumiter(self, other, rmax, bunch=100000):
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j | cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum. | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L13-L28 | [
"def makeiter(feeder):\n q = queue.Queue(2)\n def process(*args):\n q.put(args)\n def wrap(process):\n try:\n feeder(process)\n except Exception as e:\n q.put(e)\n finally:\n q.put(StopIteration)\n old = signal.signal(signal.SIGINT, signal.SIG_IGN)\n t = threading.Thread(target=wrap, args=(process,))\n t.start()\n signal.signal(signal.SIGINT, old)\n while True:\n item = q.get()\n if item is StopIteration:\n q.task_done()\n q.join()\n t.join()\n break\n elif isinstance(item, Exception):\n q.task_done()\n q.join()\n t.join()\n raise item\n else:\n if len(item) == 1: item = item[0]\n yield item\n q.task_done()\n"
] | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None
def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info)
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method)
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info)
def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enum | python | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None | cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs. | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L30-L59 | null | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info)
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method)
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info)
def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.count | python | def count(self, other, r, attrs=None, info={}):
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info) | Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L61-L77 | null | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method)
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info)
def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.fof | python | def fof(self, linkinglength, out=None, method='splay'):
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | Friend-of-Friend clustering with linking length.
Returns: the label | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L79-L86 | null | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None
def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info)
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info)
def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.integrate | python | def integrate(self, min, max, attr=None, info={}):
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points. | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L88-L110 | null | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None
def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info)
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method)
def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x
|
rainwoodman/kdcount | kdcount/__init__.py | KDNode.make_forest | python | def make_forest(self, chunksize):
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x | Divide a tree branch to a forest,
each subtree of size at most chunksize | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L112-L129 | null | class KDNode(_core.KDNode):
def __repr__(self):
return ('KDNode(dim=%d, split=%d, size=%d)' %
(self.dim, self.split, self.size))
def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j
def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None
def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info)
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method)
def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info)
|
rainwoodman/kdcount | kdcount/utils.py | bincount | python | def bincount(dig, weight, minlength):
if numpy.isscalar(weight):
return numpy.bincount(dig, minlength=minlength) * weight
else:
return numpy.bincount(dig, weight, minlength) | bincount supporting scalar and vector weight | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/utils.py#L27-L32 | null | import numpy
try:
from sharedmem import MapReduce
from sharedmem import empty
except ImportError:
import numpy
empty = numpy.empty
class MapReduce(object):
def __init__(self, np=None):
self.critical = self
self.np = 0
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
def map(self, work, items, reduce=None):
if reduce is not None:
callreduce = lambda r: \
reduce(*r) if isinstance(r, tuple) \
else reduce(r)
else:
callreduce = lambda r: r
return [callreduce(work(i)) for i in items]
# for creating dummy '1.0' arrays
from numpy.lib.stride_tricks import as_strided
class constant_array(numpy.ndarray):
def __new__(kls, shape, dtype='f8'):
if numpy.isscalar(shape):
shape = (shape,)
foo = numpy.empty((), dtype=dtype)
self = as_strided(foo, list(shape) + list(foo.shape),
[0] * len(shape) + list(foo.strides)).view(type=constant_array)
self.value = foo
return self
def __getitem__(self, key):
if isinstance(key, slice):
start, end, step = key.indices(len(self))
N = (end - start) // step
elif isinstance(key, (list,)):
N = len(key)
elif isinstance(key, (numpy.ndarray,)):
if key.dtype == numpy.dtype('?'):
N = key.sum()
else:
N = len(key)
else:
N = None
if N is None:
return numpy.ndarray.__getitem__(self, key)
else:
shape = [N] + list(self.shape[1:])
r = constant_array(shape, self.dtype)
r.value[...] = self.value[...]
return r
def __array_wrap__(self, outarr, context):
return outarr.view(type=numpy.ndarray)
|
rstoneback/pysatMagVect | pysatMagVect/satellite.py | add_mag_drift_unit_vectors_ecef | python | def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return | Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z) | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L3-L168 | null | from . import *
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return
def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return
def add_footpoint_and_equatorial_drifts(inst, equ_mer_scalar='equ_mer_drifts_scalar',
equ_zonal_scalar='equ_zon_drifts_scalar',
north_mer_scalar='north_footpoint_mer_drifts_scalar',
north_zon_scalar='north_footpoint_zon_drifts_scalar',
south_mer_scalar='south_footpoint_mer_drifts_scalar',
south_zon_scalar='south_footpoint_zon_drifts_scalar',
mer_drift='iv_mer',
zon_drift='iv_zon'):
"""Translates geomagnetic ion velocities to those at footpoints and magnetic equator.
Note
----
Presumes scalar values for mapping ion velocities are already in the inst, labeled
by north_footpoint_zon_drifts_scalar, north_footpoint_mer_drifts_scalar,
equ_mer_drifts_scalar, equ_zon_drifts_scalar.
Also presumes that ion motions in the geomagnetic system are present and labeled
as 'iv_mer' and 'iv_zon' for meridional and zonal ion motions.
This naming scheme is used by the other pysat oriented routines
in this package.
Parameters
----------
inst : pysat.Instrument
equ_mer_scalar : string
Label used to identify equatorial scalar for meridional ion drift
equ_zon_scalar : string
Label used to identify equatorial scalar for zonal ion drift
north_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
north_zon_scalar : string
Label used to identify northern footpoint scalar for zonal ion drift
south_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
south_zon_scalar : string
Label used to identify southern footpoint scalar for zonal ion drift
mer_drift : string
Label used to identify meridional ion drifts within inst
zon_drift : string
Label used to identify zonal ion drifts within inst
Returns
-------
None
Modifies pysat.Instrument object in place. Drifts mapped to the magnetic equator
are labeled 'equ_mer_drift' and 'equ_zon_drift'. Mappings to the northern
and southern footpoints are labeled 'south_footpoint_mer_drift' and
'south_footpoint_zon_drift'. Similarly for the northern hemisphere.
"""
inst['equ_mer_drift'] = {'data' : inst[equ_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Equatorial meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'magnetic equator. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Meridional Ion Velocity',
'axis':'Equatorial Meridional Ion Velocity',
'desc':'Equatorial Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['equ_zon_drift'] = {'data' : inst[equ_zonal_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Equatorial zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'magnetic equator. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Zonal Ion Velocity',
'axis':'Equatorial Zonal Ion Velocity',
'desc':'Equatorial Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_mer_drift'] = {'data' : inst[south_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Southern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'southern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Meridional Ion Velocity',
'axis':'Southern Meridional Ion Velocity',
'desc':'Southern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_zon_drift'] = {'data':inst[south_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Southern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'southern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the southern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Zonal Ion Velocity',
'axis':'Southern Zonal Ion Velocity',
'desc':'Southern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_mer_drift'] = {'data':inst[north_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Northern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'northern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Meridional Ion Velocity',
'axis':'Northern Meridional Ion Velocity',
'desc':'Northern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_zon_drift'] = {'data':inst[north_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Northern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'northern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the northern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Zonal Ion Velocity',
'axis':'Northern Zonal Ion Velocity',
'desc':'Northern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
|
rstoneback/pysatMagVect | pysatMagVect/satellite.py | add_mag_drift_unit_vectors | python | def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return | Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L171-L342 | [
"def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,\n ref_height=120.):\n \"\"\"Adds unit vectors expressing the ion drift coordinate system\n organized by the geomagnetic field. Unit vectors are expressed\n in ECEF coordinates.\n\n Parameters\n ----------\n inst : pysat.Instrument\n Instrument object that will get unit vectors\n max_steps : int\n Maximum number of steps allowed for field line tracing\n step_size : float\n Maximum step size (km) allowed when field line tracing\n ref_height : float\n Altitude used as cutoff for labeling a field line location a footpoint\n\n Returns\n -------\n None\n unit vectors are added to the passed Instrument object with a naming \n scheme:\n 'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)\n 'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)\n 'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)\n\n \"\"\"\n\n # add unit vectors for magnetic drifts in ecef coordinates\n zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'], \n inst['longitude'], inst['altitude'], inst.data.index,\n steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)\n\n inst['unit_zon_ecef_x'] = zvx\n inst['unit_zon_ecef_y'] = zvy\n inst['unit_zon_ecef_z'] = zvz\n\n inst['unit_fa_ecef_x'] = bx\n inst['unit_fa_ecef_y'] = by\n inst['unit_fa_ecef_z'] = bz\n\n inst['unit_mer_ecef_x'] = mx\n inst['unit_mer_ecef_y'] = my\n inst['unit_mer_ecef_z'] = mz\n\n inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',\n 'desc': 'Zonal unit vector along ECEF-x',\n 'label': 'Zonal unit vector along ECEF-x',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Zonal unit vector along ECEF-x',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',\n 'desc': 'Zonal unit vector along ECEF-y',\n 'label': 'Zonal unit vector along ECEF-y',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Zonal unit vector along ECEF-y',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',\n 'desc': 'Zonal unit vector along ECEF-z',\n 'label': 'Zonal unit vector along ECEF-z',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Zonal unit vector along ECEF-z',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n\n inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',\n 'desc': 'Field-aligned unit vector along ECEF-x',\n 'label': 'Field-aligned unit vector along ECEF-x',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Field-aligned unit vector along ECEF-x',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',\n 'desc': 'Field-aligned unit vector along ECEF-y',\n 'label': 'Field-aligned unit vector along ECEF-y',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Field-aligned unit vector along ECEF-y',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',\n 'desc': 'Field-aligned unit vector along ECEF-z',\n 'label': 'Field-aligned unit vector along ECEF-z',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Field-aligned unit vector along ECEF-z',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n\n inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',\n 'desc': 'Meridional unit vector along ECEF-x',\n 'label': 'Meridional unit vector along ECEF-x',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Meridional unit vector along ECEF-x',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',\n 'desc': 'Meridional unit vector along ECEF-y',\n 'label': 'Meridional unit vector along ECEF-y',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Meridional unit vector along ECEF-y',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',\n 'desc': 'Meridional unit vector along ECEF-z',\n 'label': 'Meridional unit vector along ECEF-z',\n 'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '\n 'Vector system is calcluated by field-line tracing along IGRF values '\n 'down to reference altitudes of 120 km in both the Northern and Southern '\n 'hemispheres. These two points, along with the satellite position, are '\n 'used to define the magnetic meridian. Vector math from here generates '\n 'the orthogonal system.'),\n 'axis': 'Meridional unit vector along ECEF-z',\n 'value_min': -1.,\n 'value_max': 1.,\n }\n\n return\n"
] | from . import *
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return
def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return
def add_footpoint_and_equatorial_drifts(inst, equ_mer_scalar='equ_mer_drifts_scalar',
equ_zonal_scalar='equ_zon_drifts_scalar',
north_mer_scalar='north_footpoint_mer_drifts_scalar',
north_zon_scalar='north_footpoint_zon_drifts_scalar',
south_mer_scalar='south_footpoint_mer_drifts_scalar',
south_zon_scalar='south_footpoint_zon_drifts_scalar',
mer_drift='iv_mer',
zon_drift='iv_zon'):
"""Translates geomagnetic ion velocities to those at footpoints and magnetic equator.
Note
----
Presumes scalar values for mapping ion velocities are already in the inst, labeled
by north_footpoint_zon_drifts_scalar, north_footpoint_mer_drifts_scalar,
equ_mer_drifts_scalar, equ_zon_drifts_scalar.
Also presumes that ion motions in the geomagnetic system are present and labeled
as 'iv_mer' and 'iv_zon' for meridional and zonal ion motions.
This naming scheme is used by the other pysat oriented routines
in this package.
Parameters
----------
inst : pysat.Instrument
equ_mer_scalar : string
Label used to identify equatorial scalar for meridional ion drift
equ_zon_scalar : string
Label used to identify equatorial scalar for zonal ion drift
north_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
north_zon_scalar : string
Label used to identify northern footpoint scalar for zonal ion drift
south_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
south_zon_scalar : string
Label used to identify southern footpoint scalar for zonal ion drift
mer_drift : string
Label used to identify meridional ion drifts within inst
zon_drift : string
Label used to identify zonal ion drifts within inst
Returns
-------
None
Modifies pysat.Instrument object in place. Drifts mapped to the magnetic equator
are labeled 'equ_mer_drift' and 'equ_zon_drift'. Mappings to the northern
and southern footpoints are labeled 'south_footpoint_mer_drift' and
'south_footpoint_zon_drift'. Similarly for the northern hemisphere.
"""
inst['equ_mer_drift'] = {'data' : inst[equ_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Equatorial meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'magnetic equator. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Meridional Ion Velocity',
'axis':'Equatorial Meridional Ion Velocity',
'desc':'Equatorial Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['equ_zon_drift'] = {'data' : inst[equ_zonal_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Equatorial zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'magnetic equator. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Zonal Ion Velocity',
'axis':'Equatorial Zonal Ion Velocity',
'desc':'Equatorial Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_mer_drift'] = {'data' : inst[south_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Southern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'southern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Meridional Ion Velocity',
'axis':'Southern Meridional Ion Velocity',
'desc':'Southern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_zon_drift'] = {'data':inst[south_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Southern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'southern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the southern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Zonal Ion Velocity',
'axis':'Southern Zonal Ion Velocity',
'desc':'Southern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_mer_drift'] = {'data':inst[north_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Northern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'northern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Meridional Ion Velocity',
'axis':'Northern Meridional Ion Velocity',
'desc':'Northern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_zon_drift'] = {'data':inst[north_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Northern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'northern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the northern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Zonal Ion Velocity',
'axis':'Northern Zonal Ion Velocity',
'desc':'Northern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
|
rstoneback/pysatMagVect | pysatMagVect/satellite.py | add_mag_drifts | python | def add_mag_drifts(inst):
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return | Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L345-L415 | null | from . import *
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return
def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return
def add_footpoint_and_equatorial_drifts(inst, equ_mer_scalar='equ_mer_drifts_scalar',
equ_zonal_scalar='equ_zon_drifts_scalar',
north_mer_scalar='north_footpoint_mer_drifts_scalar',
north_zon_scalar='north_footpoint_zon_drifts_scalar',
south_mer_scalar='south_footpoint_mer_drifts_scalar',
south_zon_scalar='south_footpoint_zon_drifts_scalar',
mer_drift='iv_mer',
zon_drift='iv_zon'):
"""Translates geomagnetic ion velocities to those at footpoints and magnetic equator.
Note
----
Presumes scalar values for mapping ion velocities are already in the inst, labeled
by north_footpoint_zon_drifts_scalar, north_footpoint_mer_drifts_scalar,
equ_mer_drifts_scalar, equ_zon_drifts_scalar.
Also presumes that ion motions in the geomagnetic system are present and labeled
as 'iv_mer' and 'iv_zon' for meridional and zonal ion motions.
This naming scheme is used by the other pysat oriented routines
in this package.
Parameters
----------
inst : pysat.Instrument
equ_mer_scalar : string
Label used to identify equatorial scalar for meridional ion drift
equ_zon_scalar : string
Label used to identify equatorial scalar for zonal ion drift
north_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
north_zon_scalar : string
Label used to identify northern footpoint scalar for zonal ion drift
south_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
south_zon_scalar : string
Label used to identify southern footpoint scalar for zonal ion drift
mer_drift : string
Label used to identify meridional ion drifts within inst
zon_drift : string
Label used to identify zonal ion drifts within inst
Returns
-------
None
Modifies pysat.Instrument object in place. Drifts mapped to the magnetic equator
are labeled 'equ_mer_drift' and 'equ_zon_drift'. Mappings to the northern
and southern footpoints are labeled 'south_footpoint_mer_drift' and
'south_footpoint_zon_drift'. Similarly for the northern hemisphere.
"""
inst['equ_mer_drift'] = {'data' : inst[equ_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Equatorial meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'magnetic equator. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Meridional Ion Velocity',
'axis':'Equatorial Meridional Ion Velocity',
'desc':'Equatorial Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['equ_zon_drift'] = {'data' : inst[equ_zonal_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Equatorial zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'magnetic equator. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Zonal Ion Velocity',
'axis':'Equatorial Zonal Ion Velocity',
'desc':'Equatorial Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_mer_drift'] = {'data' : inst[south_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Southern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'southern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Meridional Ion Velocity',
'axis':'Southern Meridional Ion Velocity',
'desc':'Southern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_zon_drift'] = {'data':inst[south_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Southern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'southern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the southern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Zonal Ion Velocity',
'axis':'Southern Zonal Ion Velocity',
'desc':'Southern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_mer_drift'] = {'data':inst[north_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Northern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'northern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Meridional Ion Velocity',
'axis':'Northern Meridional Ion Velocity',
'desc':'Northern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_zon_drift'] = {'data':inst[north_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Northern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'northern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the northern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Zonal Ion Velocity',
'axis':'Northern Zonal Ion Velocity',
'desc':'Northern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
|
rstoneback/pysatMagVect | pysatMagVect/satellite.py | add_footpoint_and_equatorial_drifts | python | def add_footpoint_and_equatorial_drifts(inst, equ_mer_scalar='equ_mer_drifts_scalar',
equ_zonal_scalar='equ_zon_drifts_scalar',
north_mer_scalar='north_footpoint_mer_drifts_scalar',
north_zon_scalar='north_footpoint_zon_drifts_scalar',
south_mer_scalar='south_footpoint_mer_drifts_scalar',
south_zon_scalar='south_footpoint_zon_drifts_scalar',
mer_drift='iv_mer',
zon_drift='iv_zon'):
inst['equ_mer_drift'] = {'data' : inst[equ_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Equatorial meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'magnetic equator. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Meridional Ion Velocity',
'axis':'Equatorial Meridional Ion Velocity',
'desc':'Equatorial Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['equ_zon_drift'] = {'data' : inst[equ_zonal_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Equatorial zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'magnetic equator. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Zonal Ion Velocity',
'axis':'Equatorial Zonal Ion Velocity',
'desc':'Equatorial Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_mer_drift'] = {'data' : inst[south_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Southern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'southern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Meridional Ion Velocity',
'axis':'Southern Meridional Ion Velocity',
'desc':'Southern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_zon_drift'] = {'data':inst[south_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Southern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'southern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the southern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Zonal Ion Velocity',
'axis':'Southern Zonal Ion Velocity',
'desc':'Southern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_mer_drift'] = {'data':inst[north_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Northern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'northern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Meridional Ion Velocity',
'axis':'Northern Meridional Ion Velocity',
'desc':'Northern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_zon_drift'] = {'data':inst[north_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Northern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'northern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the northern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Zonal Ion Velocity',
'axis':'Northern Zonal Ion Velocity',
'desc':'Northern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.} | Translates geomagnetic ion velocities to those at footpoints and magnetic equator.
Note
----
Presumes scalar values for mapping ion velocities are already in the inst, labeled
by north_footpoint_zon_drifts_scalar, north_footpoint_mer_drifts_scalar,
equ_mer_drifts_scalar, equ_zon_drifts_scalar.
Also presumes that ion motions in the geomagnetic system are present and labeled
as 'iv_mer' and 'iv_zon' for meridional and zonal ion motions.
This naming scheme is used by the other pysat oriented routines
in this package.
Parameters
----------
inst : pysat.Instrument
equ_mer_scalar : string
Label used to identify equatorial scalar for meridional ion drift
equ_zon_scalar : string
Label used to identify equatorial scalar for zonal ion drift
north_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
north_zon_scalar : string
Label used to identify northern footpoint scalar for zonal ion drift
south_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
south_zon_scalar : string
Label used to identify southern footpoint scalar for zonal ion drift
mer_drift : string
Label used to identify meridional ion drifts within inst
zon_drift : string
Label used to identify zonal ion drifts within inst
Returns
-------
None
Modifies pysat.Instrument object in place. Drifts mapped to the magnetic equator
are labeled 'equ_mer_drift' and 'equ_zon_drift'. Mappings to the northern
and southern footpoints are labeled 'south_footpoint_mer_drift' and
'south_footpoint_zon_drift'. Similarly for the northern hemisphere. | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L418-L610 | null | from . import *
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return
def add_mag_drifts(inst):
"""Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates
along with pre-calculated unit vectors for magnetic coordinates.
Note
----
Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with
unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*',
where the unit vectors are expressed in S/C coordinates. These
vectors are calculated by add_mag_drift_unit_vectors.
Parameters
----------
inst : pysat.Instrument
Instrument object will be modified to include new ion drift magnitudes
Returns
-------
None
Instrument object modified in place
"""
inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Zonal ion velocity',
'notes':('Ion velocity relative to co-rotation along zonal '
'direction, normal to meridional plane. Positive east. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label': 'Zonal Ion Velocity',
'axis': 'Zonal Ion Velocity',
'desc': 'Zonal ion velocity',
'scale': 'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'],
'units':'m/s',
'long_name':'Field-Aligned ion velocity',
'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Field-Aligned Ion Velocity',
'axis':'Field-Aligned Ion Velocity',
'desc':'Field-Aligned Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'],
'units':'m/s',
'long_name':'Meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane. Positive is up at magnetic equator. ',
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. '),
'label':'Meridional Ion Velocity',
'axis':'Meridional Ion Velocity',
'desc':'Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
return
def add_footpoint_and_equatorial_drifts(inst, equ_mer_scalar='equ_mer_drifts_scalar',
equ_zonal_scalar='equ_zon_drifts_scalar',
north_mer_scalar='north_footpoint_mer_drifts_scalar',
north_zon_scalar='north_footpoint_zon_drifts_scalar',
south_mer_scalar='south_footpoint_mer_drifts_scalar',
south_zon_scalar='south_footpoint_zon_drifts_scalar',
mer_drift='iv_mer',
zon_drift='iv_zon'):
"""Translates geomagnetic ion velocities to those at footpoints and magnetic equator.
Note
----
Presumes scalar values for mapping ion velocities are already in the inst, labeled
by north_footpoint_zon_drifts_scalar, north_footpoint_mer_drifts_scalar,
equ_mer_drifts_scalar, equ_zon_drifts_scalar.
Also presumes that ion motions in the geomagnetic system are present and labeled
as 'iv_mer' and 'iv_zon' for meridional and zonal ion motions.
This naming scheme is used by the other pysat oriented routines
in this package.
Parameters
----------
inst : pysat.Instrument
equ_mer_scalar : string
Label used to identify equatorial scalar for meridional ion drift
equ_zon_scalar : string
Label used to identify equatorial scalar for zonal ion drift
north_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
north_zon_scalar : string
Label used to identify northern footpoint scalar for zonal ion drift
south_mer_scalar : string
Label used to identify northern footpoint scalar for meridional ion drift
south_zon_scalar : string
Label used to identify southern footpoint scalar for zonal ion drift
mer_drift : string
Label used to identify meridional ion drifts within inst
zon_drift : string
Label used to identify zonal ion drifts within inst
Returns
-------
None
Modifies pysat.Instrument object in place. Drifts mapped to the magnetic equator
are labeled 'equ_mer_drift' and 'equ_zon_drift'. Mappings to the northern
and southern footpoints are labeled 'south_footpoint_mer_drift' and
'south_footpoint_zon_drift'. Similarly for the northern hemisphere.
"""
inst['equ_mer_drift'] = {'data' : inst[equ_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Equatorial meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'magnetic equator. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Meridional Ion Velocity',
'axis':'Equatorial Meridional Ion Velocity',
'desc':'Equatorial Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['equ_zon_drift'] = {'data' : inst[equ_zonal_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Equatorial zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'magnetic equator. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic equator. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Equatorial Zonal Ion Velocity',
'axis':'Equatorial Zonal Ion Velocity',
'desc':'Equatorial Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_mer_drift'] = {'data' : inst[south_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Southern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'southern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Meridional Ion Velocity',
'axis':'Southern Meridional Ion Velocity',
'desc':'Southern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['south_footpoint_zon_drift'] = {'data':inst[south_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Southern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'southern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the southern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Southern Zonal Ion Velocity',
'axis':'Southern Zonal Ion Velocity',
'desc':'Southern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_mer_drift'] = {'data':inst[north_mer_scalar]*inst[mer_drift],
'units':'m/s',
'long_name':'Northern meridional ion velocity',
'notes':('Velocity along meridional direction, perpendicular '
'to field and within meridional plane, scaled to '
'northern footpoint. Positive is up at magnetic equator. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the magnetic footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Meridional Ion Velocity',
'axis':'Northern Meridional Ion Velocity',
'desc':'Northern Meridional Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
inst['north_footpoint_zon_drift'] = {'data':inst[north_zon_scalar]*inst[zon_drift],
'units':'m/s',
'long_name':'Northern zonal ion velocity',
'notes':('Velocity along zonal direction, perpendicular '
'to field and the meridional plane, scaled to '
'northern footpoint. Positive is generally eastward. '
'Velocity obtained using ion velocities relative '
'to co-rotation in the instrument frame along '
'with the corresponding unit vectors expressed in '
'the instrument frame. Field-line mapping and '
'the assumption of equi-potential field lines '
'is used to translate the locally measured ion '
'motion to the northern footpoint. The mapping '
'is used to determine the change in magnetic '
'field line distance, which, under assumption of '
'equipotential field lines, in turn alters '
'the electric field at that location (E=V/d). '),
'label':'Northern Zonal Ion Velocity',
'axis':'Northern Zonal Ion Velocity',
'desc':'Northern Zonal Ion Velocity',
'scale':'Linear',
'value_min':-500.,
'value_max':500.}
|
rstoneback/pysatMagVect | pysatMagVect/_core.py | geocentric_to_ecef | python | def geocentric_to_ecef(latitude, longitude, altitude):
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z | Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km | train | https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/_core.py#L24-L48 | null |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from . import igrf
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = r * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = r * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
colatitude = np.rad2deg(np.arccos(z / r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
r_n = earth_a / np.sqrt(1. - ellip ** 2 * np.sin(np.deg2rad(latitude)) ** 2)
# colatitude = 90. - latitude
x = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.cos(np.deg2rad(longitude))
y = (r_n + altitude) * np.cos(np.deg2rad(latitude)) * np.sin(np.deg2rad(longitude))
z = (r_n * (1. - ellip ** 2) + altitude) * np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b ** 2 / earth_a ** 2)
# first eccentricity squared
e2 = ellip ** 2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x ** 2 + y ** 2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2) / earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a / np.sqrt(1. - e2 * np.sin(latitude) ** 2)
h = p / np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a / np.sqrt(1. - e2*np.sin(latitude)**2)
h = p / np.cos(latitude) - r_n
latitude = np.arctan(z / p / (1. - e2 * (r_n / (r_n + h))))
# print h
# final ellipsoidal height update
h = p / np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)*np.sin(rlat) + up*np.sin(rlon)*np.cos(rlat)
z = north*np.cos(rlat) + up*np.sin(rlat)
return x, y, z
def ecef_to_enu_vector(x, y, z, glat, glong):
"""Converts vector from ECEF X,Y,Z components to East, North, Up
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
east, north, up
Vector components along east, north, and up directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
east = -x*np.sin(rlon) + y*np.cos(rlon)
north = -x*np.cos(rlon)*np.sin(rlat) - y*np.sin(rlon)*np.sin(rlat) + z*np.cos(rlat)
up = x*np.cos(rlon)*np.cos(rlat) + y*np.sin(rlon)*np.cos(rlat)+ z*np.sin(rlat)
return east, north, up
def project_ecef_vector_onto_basis(x, y, z, xx, xy, xz, yx, yy, yz, zx, zy, zz):
"""Projects vector in ecef onto different basis, with components also expressed in ECEF
Parameters
----------
x : float or array-like
ECEF-X component of vector
y : float or array-like
ECEF-Y component of vector
z : float or array-like
ECEF-Z component of vector
xx : float or array-like
ECEF-X component of the x unit vector of new basis
xy : float or array-like
ECEF-Y component of the x unit vector of new basis
xz : float or array-like
ECEF-Z component of the x unit vector of new basis
"""
out_x = x*xx + y*xy + z*xz
out_y = x*yx + y*yy + z*yz
out_z = x*zx + y*zy + z*zz
return out_x, out_y, out_z
def normalize_vector(x, y, z):
"""
Normalizes vector to produce a unit vector.
Parameters
----------
x : float or array-like
X component of vector
y : float or array-like
Y component of vector
z : float or array-like
Z component of vector
Returns
-------
x, y, z
Unit vector x,y,z components
"""
mag = np.sqrt(x**2 + y**2 + z**2)
x = x/mag
y = y/mag
z = z/mag
return x, y, z
def cross_product(x1, y1, z1, x2, y2, z2):
"""
Cross product of two vectors, v1 x v2
Parameters
----------
x1 : float or array-like
X component of vector 1
y1 : float or array-like
Y component of vector 1
z1 : float or array-like
Z component of vector 1
x2 : float or array-like
X component of vector 2
y2 : float or array-like
Y component of vector 2
z2 : float or array-like
Z component of vector 2
Returns
-------
x, y, z
Unit vector x,y,z components
"""
x = y1*z2 - y2*z1
y = z1*x2 - x1*z2
z = x1*y2 - y1*x2
return x, y, z
def field_line_trace(init, date, direction, height, steps=None,
max_steps=1E4, step_size=10., recursive_loop_count=None,
recurse=True):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : int
1 : field aligned, generally south to north.
-1 : anti-field aligned, generally north to south.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for initial point
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if recursive_loop_count is None:
recursive_loop_count = 0
#
if steps is None:
steps = np.arange(max_steps)
if not isinstance(date, float):
# recast from datetime to float, as required by IGRF12 code
doy = (date - datetime.datetime(date.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(date.year+1,1,1) - datetime.datetime(date.year,1,1)).days
date = float(date.year) + float(doy)/float(num_doy_year) + float(date.hour + date.minute/60. + date.second/3600.)/24.
trace_north = scipy.integrate.odeint(igrf.igrf_step, init.copy(),
steps,
args=(date, step_size, direction, height),
full_output=False,
printmessg=False,
ixpr=False) #,
# mxstep=500)
# check that we reached final altitude
check = trace_north[-1, :]
x, y, z = ecef_to_geodetic(*check)
if height == 0:
check_height = 1.
else:
check_height = height
# fortran integration gets close to target height
if recurse & (z > check_height*1.000001):
if (recursive_loop_count < 1000):
# When we have not reached the reference height, call field_line_trace
# again by taking check value as init - recursive call
recursive_loop_count = recursive_loop_count + 1
trace_north1 = field_line_trace(check, date, direction, height,
step_size=step_size,
max_steps=max_steps,
recursive_loop_count=recursive_loop_count,
steps=steps)
else:
raise RuntimeError("After 1000 iterations couldn't reach target altitude")
return np.vstack((trace_north, trace_north1))
else:
# return results if we make it to the target altitude
# filter points to terminate at point closest to target height
# code below not correct, we want the first poiint that goes below target
# height
# code also introduces a variable length return, though I suppose
# that already exists with the recursive functionality
# x, y, z = ecef_to_geodetic(trace_north[:,0], trace_north[:,1], trace_north[:,2])
# idx = np.argmin(np.abs(check_height - z))
return trace_north #[:idx+1,:]
def full_field_line(init, date, height, step_size=100., max_steps=1000,
steps=None, **kwargs):
"""Perform field line tracing using IGRF and scipy.integrate.odeint.
Parameters
----------
init : array-like of floats
Position to begin field line tracing from in ECEF (x,y,z) km
date : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
height : float
Altitude to terminate trace, geodetic WGS84 (km)
max_steps : float
Maximum number of steps along field line that should be taken
step_size : float
Distance in km for each large integration step. Multiple substeps
are taken as determined by scipy.integrate.odeint
steps : array-like of ints or floats
Number of steps along field line when field line trace positions should
be reported. By default, each step is reported; steps=np.arange(max_steps).
Two traces are made, one north, the other south, thus the output array
could have double max_steps, or more via recursion.
Returns
-------
numpy array
2D array. [0,:] has the x,y,z location for southern footpoint
[:,0] is the x positions over the integration.
Positions are reported in ECEF (km).
"""
if steps is None:
steps = np.arange(max_steps)
# trace north, then south, and combine
trace_south = field_line_trace(init, date, -1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
trace_north = field_line_trace(init, date, 1., height,
steps=steps,
step_size=step_size,
max_steps=max_steps,
**kwargs)
# order of field points is generally along the field line, south to north
# don't want to include the initial point twice
trace = np.vstack((trace_south[::-1][:-1,:], trace_north))
return trace
def calculate_mag_drift_unit_vectors_ecef(latitude, longitude, altitude, datetimes,
steps=None, max_steps=1000, step_size=100.,
ref_height=120., filter_zonal=True):
"""Calculates unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Note
----
The zonal vector is calculated by field-line tracing from
the input locations toward the footpoint locations at ref_height.
The cross product of these two vectors is taken to define the plane of
the magnetic field. This vector is not always orthogonal
with the local field-aligned vector (IGRF), thus any component of the
zonal vector with the field-aligned direction is removed (optional).
The meridional unit vector is defined via the cross product of the
zonal and field-aligned directions.
Parameters
----------
latitude : array-like of floats (degrees)
Latitude of location, degrees, WGS84
longitude : array-like of floats (degrees)
Longitude of location, degrees, WGS84
altitude : array-like of floats (km)
Altitude of location, height above surface, WGS84
datetimes : array-like of datetimes
Time to calculate vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
filter_zonal : bool
If True, removes any field aligned component from the calculated
zonal unit vector. Resulting coordinate system is not-orthogonal.
Returns
-------
zon_x, zon_y, zon_z, fa_x, fa_y, fa_z, mer_x, mer_y, mer_z
"""
if steps is None:
steps = np.arange(max_steps)
# calculate satellite position in ECEF coordinates
ecef_x, ecef_y, ecef_z = geodetic_to_ecef(latitude, longitude, altitude)
# also get position in geocentric coordinates
geo_lat, geo_long, geo_alt = ecef_to_geocentric(ecef_x, ecef_y, ecef_z,
ref_height=0.)
# filter longitudes (could use pysat's function here)
idx, = np.where(geo_long < 0)
geo_long[idx] = geo_long[idx] + 360.
# prepare output lists
north_x = [];
north_y = [];
north_z = []
south_x = [];
south_y = [];
south_z = []
bn = [];
be = [];
bd = []
for x, y, z, alt, colat, elong, time in zip(ecef_x, ecef_y, ecef_z,
geo_alt, np.deg2rad(90. - geo_lat),
np.deg2rad(geo_long), datetimes):
init = np.array([x, y, z])
# date = inst.yr + inst.doy / 366.
# trace = full_field_line(init, time, ref_height, step_size=step_size,
# max_steps=max_steps,
# steps=steps)
trace_north = field_line_trace(init, time, 1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
trace_south = field_line_trace(init, time, -1., ref_height, steps=steps,
step_size=step_size, max_steps=max_steps)
# store final location, full trace goes south to north
trace_north = trace_north[-1, :]
trace_south = trace_south[-1, :]
# magnetic field at spacecraft location, using geocentric inputs
# to get magnetic field in geocentric output
# recast from datetime to float, as required by IGRF12 code
doy = (time - datetime.datetime(time.year,1,1)).days
# number of days in year, works for leap years
num_doy_year = (datetime.datetime(time.year+1,1,1) - datetime.datetime(time.year,1,1)).days
date = time.year + float(doy)/float(num_doy_year) + (time.hour + time.minute/60. + time.second/3600.)/24.
# get IGRF field components
# tbn, tbe, tbd, tbmag are in nT
tbn, tbe, tbd, tbmag = igrf.igrf12syn(0, date, 1, alt, colat, elong)
# collect outputs
south_x.append(trace_south[0])
south_y.append(trace_south[1])
south_z.append(trace_south[2])
north_x.append(trace_north[0])
north_y.append(trace_north[1])
north_z.append(trace_north[2])
bn.append(tbn);
be.append(tbe);
bd.append(tbd)
north_x = np.array(north_x)
north_y = np.array(north_y)
north_z = np.array(north_z)
south_x = np.array(south_x)
south_y = np.array(south_y)
south_z = np.array(south_z)
bn = np.array(bn)
be = np.array(be)
bd = np.array(bd)
# calculate vector from satellite to northern/southern footpoints
north_x = north_x - ecef_x
north_y = north_y - ecef_y
north_z = north_z - ecef_z
north_x, north_y, north_z = normalize_vector(north_x, north_y, north_z)
south_x = south_x - ecef_x
south_y = south_y - ecef_y
south_z = south_z - ecef_z
south_x, south_y, south_z = normalize_vector(south_x, south_y, south_z)
# calculate magnetic unit vector
bx, by, bz = enu_to_ecef_vector(be, bn, -bd, geo_lat, geo_long)
bx, by, bz = normalize_vector(bx, by, bz)
# take cross product of southward and northward vectors to get the zonal vector
zvx_foot, zvy_foot, zvz_foot = cross_product(south_x, south_y, south_z,
north_x, north_y, north_z)
# getting zonal vector utilizing magnetic field vector instead
zvx_north, zvy_north, zvz_north = cross_product(north_x, north_y, north_z,
bx, by, bz)
# getting zonal vector utilizing magnetic field vector instead and southern point
zvx_south, zvy_south, zvz_south = cross_product(south_x, south_y, south_z,
bx, by, bz)
# normalize the vectors
norm_foot = np.sqrt(zvx_foot ** 2 + zvy_foot ** 2 + zvz_foot ** 2)
# calculate zonal vector
zvx = zvx_foot / norm_foot
zvy = zvy_foot / norm_foot
zvz = zvz_foot / norm_foot
# remove any field aligned component to the zonal vector
dot_fa = zvx * bx + zvy * by + zvz * bz
zvx -= dot_fa * bx
zvy -= dot_fa * by
zvz -= dot_fa * bz
zvx, zvy, zvz = normalize_vector(zvx, zvy, zvz)
# compute meridional vector
# cross product of zonal and magnetic unit vector
mx, my, mz = cross_product(zvx, zvy, zvz,
bx, by, bz)
# add unit vectors for magnetic drifts in ecef coordinates
return zvx, zvy, zvz, bx, by, bz, mx, my, mz
def step_until_intersect(pos, field_line, sign, time, direction=None,
step_size_goal=5.,
field_step_size=None):
"""Starting at pos, method steps along magnetic unit vector direction
towards the supplied field line trace. Determines the distance of
closest approach to field line.
Routine is used when calculting the mapping of electric fields along
magnetic field lines. Voltage remains constant along the field but the
distance between field lines does not.This routine may be used to form the
last leg when trying to trace out a closed field line loop.
Routine will create a high resolution field line trace (.01 km step size)
near the location of closest approach to better determine where the
intersection occurs.
Parameters
----------
pos : array-like
X, Y, and Z ECEF locations to start from
field_line : array-like (:,3)
X, Y, and Z ECEF locations of field line trace, produced by the
field_line_trace method.
sign : int
if 1, move along positive unit vector. Negwtive direction for -1.
time : datetime or float
Date to perform tracing on (year + day/365 + hours/24. + etc.)
Accounts for leap year if datetime provided.
direction : string ('meridional', 'zonal', or 'aligned')
Which unit vector direction to move slong when trying to intersect
with supplied field line trace. See step_along_mag_unit_vector method
for more.
step_size_goal : float
step size goal that method will try to match when stepping towards field line.
Returns
-------
(float, array, float)
Total distance taken along vector direction; the position after taking
the step [x, y, z] in ECEF; distance of closest approach from input pos
towards the input field line trace.
"""
# work on a copy, probably not needed
field_copy = field_line
# set a high last minimum distance to ensure first loop does better than this
last_min_dist = 2500000.
# scalar is the distance along unit vector line that we are taking
scalar = 0.
# repeat boolean
repeat=True
# first run boolean
first=True
# factor is a divisor applied to the remaining distance between point and field line
# I slowly take steps towards the field line and I don't want to overshoot
# each time my minimum distance increases, I step back, increase factor, reducing
# my next step size, then I try again
factor = 1
while repeat:
# take a total step along magnetic unit vector
# try to take steps near user provided step_size_goal
unit_steps = np.abs(scalar//step_size_goal)
if unit_steps == 0:
unit_steps = 1
# print (unit_steps, scalar/unit_steps)
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2], time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
# find closest point along field line trace
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
min_idx = np.argmin(diff_mag)
if first:
# first time in while loop, create some information
# make a high resolution field line trace around closest distance
# want to take a field step size in each direction
# maintain accuracy of high res trace below to be .01 km
init = field_copy[min_idx,:]
field_copy = full_field_line(init, time, 0.,
step_size=0.01,
max_steps=int(field_step_size/.01),
recurse=False)
# difference with position
diff = field_copy - pos_step
diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# find closest one
min_idx = np.argmin(diff_mag)
# # reduce number of elements we really need to check
# field_copy = field_copy[min_idx-100:min_idx+100]
# # difference with position
# diff = field_copy - pos_step
# diff_mag = np.sqrt((diff ** 2).sum(axis=1))
# # find closest one
# min_idx = np.argmin(diff_mag)
first = False
# pull out distance of closest point
min_dist = diff_mag[min_idx]
# check how the solution is doing
# if well, add more distance to the total step and recheck if closer
# if worse, step back and try a smaller step
if min_dist > last_min_dist:
# last step we took made the solution worse
if factor > 4:
# we've tried enough, stop looping
repeat = False
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# calculate latest position
pos_step = step_along_mag_unit_vector(pos[0], pos[1], pos[2],
time,
direction=direction,
num_steps=unit_steps,
step_size=np.abs(scalar)/unit_steps,
scalar=sign)
else:
# undo increment to last total distance
scalar = scalar - last_min_dist/(2*factor)
# increase the divisor used to reduce the distance
# actually stepped per increment
factor = factor + 1.
# try a new increment to total distance
scalar = scalar + last_min_dist/(2*factor)
else:
# we did better, move even closer, a fraction of remaining distance
# increment scalar, but only by a fraction
scalar = scalar + min_dist/(2*factor)
# we have a new standard to judge against, set it
last_min_dist = min_dist.copy()
# return magnitude of step
return scalar, pos_step, min_dist
def step_along_mag_unit_vector(x, y, z, date, direction=None, num_steps=5.,
step_size=5., scalar=1):
"""
Move along 'lines' formed by following the magnetic unit vector directions.
Moving along the field is effectively the same as a field line trace though
extended movement along a field should use the specific field_line_trace
method.
Parameters
----------
x : ECEF-x (km)
Location to step from in ECEF (km). Scalar input.
y : ECEF-y (km)
Location to step from in ECEF (km). Scalar input.
z : ECEF-z (km)
Location to step from in ECEF (km). Scalar input.
date : list-like of datetimes
Date and time for magnetic field
direction : string
String identifier for which unit vector directino to move along.
Supported inputs, 'meridional', 'zonal', 'aligned'
num_steps : int
Number of steps to take along unit vector direction
step_size = float
Distance taken for each step (km)
scalar : int
Scalar modifier for step size distance. Input a -1 to move along
negative unit vector direction.
Returns
-------
np.array
[x, y, z] of ECEF location after taking num_steps along direction,
each step_size long.
"""
# set parameters for the field line tracing routines
field_step_size = 100.
field_max_steps = 1000
field_steps = np.arange(field_max_steps)
for i in np.arange(num_steps):
# x, y, z in ECEF
# convert to geodetic
lat, lon, alt = ecef_to_geodetic(x, y, z)
# get unit vector directions
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(
[lat], [lon], [alt], [date],
steps=field_steps,
max_steps=field_max_steps,
step_size=field_step_size,
ref_height=0.)
# pull out the direction we need
if direction == 'meridional':
ux, uy, uz = mx, my, mz
elif direction == 'zonal':
ux, uy, uz = zvx, zvy, zvz
elif direction == 'aligned':
ux, uy, uz = bx, by, bz
# take steps along direction
x = x + step_size*ux[0]*scalar
y = y + step_size*uy[0]*scalar
z = z + step_size*uz[0]*scalar
return np.array([x, y, z])
def apex_location_info(glats, glons, alts, dates):
"""Determine apex location for the field line passing through input point.
Employs a two stage method. A broad step (100 km) field line trace spanning
Northern/Southern footpoints is used to find the location with the largest
geodetic (WGS84) height. A higher resolution trace (.1 km) is then used to
get a better fix on this location. Greatest geodetic height is once again
selected.
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
Returns
-------
(float, float, float, float, float, float)
ECEF X (km), ECEF Y (km), ECEF Z (km),
Geodetic Latitude (degrees),
Geodetic Longitude (degrees),
Geodetic Altitude (km)
"""
# use input location and convert to ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare parameters for field line trace
step_size = 100.
max_steps = 1000
steps = np.arange(max_steps)
# high resolution trace parameters
fine_step_size = .01
fine_max_steps = int(step_size/fine_step_size)+10
fine_steps = np.arange(fine_max_steps)
# prepare output
out_x = []
out_y = []
out_z = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# to get the apex location we need to do a field line trace
# then find the highest point
trace = full_field_line(np.array([ecef_x, ecef_y, ecef_z]), date, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# repeat using a high resolution trace one big step size each
# direction around identified max
# recurse False ensures only max_steps are taken
trace = full_field_line(trace[max_idx,:], date, 0.,
steps=fine_steps,
step_size=fine_step_size,
max_steps=fine_max_steps,
recurse=False)
# convert all locations to geodetic coordinates
tlat, tlon, talt = ecef_to_geodetic(trace[:,0], trace[:,1], trace[:,2])
# determine location that is highest with respect to the geodetic Earth
max_idx = np.argmax(talt)
# collect outputs
out_x.append(trace[max_idx,0])
out_y.append(trace[max_idx,1])
out_z.append(trace[max_idx,2])
out_x = np.array(out_x)
out_y = np.array(out_y)
out_z = np.array(out_z)
glat, glon, alt = ecef_to_geodetic(out_x, out_y, out_z)
return out_x, out_y, out_z, glat, glon, alt
def closed_loop_edge_lengths_via_footpoint(glats, glons, alts, dates, direction,
vector_direction, step_size=None,
max_steps=None, edge_length=25.,
edge_steps=5):
"""
Forms closed loop integration along mag field, satrting at input
points and goes through footpoint. At footpoint, steps along vector direction
in both positive and negative directions, then traces back to opposite
footpoint. Back at input location, steps toward those new field lines
(edge_length) along vector direction until hitting distance of minimum
approach. Loops don't always close. Returns total edge distance
that goes through input location, along with the distances of closest approach.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
direction : string
'north' or 'south' for tracing through northern or
southern footpoint locations
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, np.array, np.array
A closed loop field line path through input location and footpoint in
northern/southern hemisphere and back is taken. The return edge length
through input location is provided.
The distances of closest approach for the positive step along vector
direction, and the negative step are returned.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
if direction == 'south':
direct = -1
elif direction == 'north':
direct = 1
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
# going to try and form close loops via field line integration
# start at location of interest, map down to northern or southern
# footpoints then take symmetric steps along meridional and zonal
# directions and trace back from location of interest, step along
# field line directions until we intersect or hit the distance of
# closest approach to the return field line with the known
# distances of footpoint steps, and the closet approach distance
# we can determine the scalar mapping of one location to another
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# print (glat, glon, alt)
# trace to footpoint, starting with input location
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace = field_line_trace(sc_root, double_date, direct, 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# pull out footpoint location
ftpnt = trace[-1, :]
ft_glat, ft_glon, ft_alt = ecef_to_geodetic(*ftpnt)
# take step from footpoint along + vector direction
plus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_plus = field_line_trace(plus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# take half step from first footpoint along - vector direction
minus_step = step_along_mag_unit_vector(ftpnt[0], ftpnt[1], ftpnt[2],
date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
# trace this back to other footpoint
other_minus = field_line_trace(minus_step, double_date, -direct, 0.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# need to determine where the intersection of field line coming back from
# footpoint through postive vector direction step and back
# in relation to the vector direction from the s/c location.
pos_edge_length, _, mind_pos = step_until_intersect(sc_root,
other_plus,
1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# take half step from S/C along - vector direction
minus_edge_length, _, mind_minus = step_until_intersect(sc_root,
other_minus,
-1, date,
direction=vector_direction,
field_step_size=step_size,
step_size_goal=edge_length/edge_steps)
# collect outputs
full_local_step.append(pos_edge_length + minus_edge_length)
min_distance_plus.append(mind_pos)
min_distance_minus.append(mind_minus)
return np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
def closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
vector_direction,
edge_length=25.,
edge_steps=5):
"""
Calculates the distance between apex locations mapping to the input location.
Using the input location, the apex location is calculated. Also from the input
location, a step along both the positive and negative
vector_directions is taken, and the apex locations for those points are calculated.
The difference in position between these apex locations is the total centered
distance between magnetic field lines at the magnetic apex when starting
locally with a field line half distance of edge_length.
An alternative method has been implemented, then commented out.
This technique takes multiple steps from the origin apex towards the apex
locations identified along vector_direction. In principle this is more accurate
but more computationally intensive, similar to the footpoint model.
A comparison is planned.
Note
----
vector direction refers to the magnetic unit vector direction
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
vector_direction : string
'meridional' or 'zonal' unit vector directions
step_size : float (km)
Step size (km) used for field line integration
max_steps : int
Number of steps taken for field line integration
edge_length : float (km)
Half of total edge length (step) taken at footpoint location.
edge_length step in both positive and negative directions.
edge_steps : int
Number of steps taken from footpoint towards new field line
in a given direction (positive/negative) along unit vector
Returns
-------
np.array, ### np.array, np.array
The change in field line apex locations.
## Pending ## The return edge length through input location is provided.
## Pending ## The distances of closest approach for the positive step
along vector direction, and the negative step are returned.
"""
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
apex_edge_length = []
# outputs for alternative calculation
full_local_step = []
min_distance_plus = []
min_distance_minus = []
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# apex in ecef (maps to input location)
apex_root = np.array([apex_x[0], apex_y[0], apex_z[0]])
# take step from s/c along + vector direction
# then get the apex location
plus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
plus_lat, plus_lon, plus_alt = ecef_to_geodetic(*plus)
plus_apex_x, plus_apex_y, plus_apex_z, plus_apex_lat, plus_apex_lon, plus_apex_alt = \
apex_location_info([plus_lat], [plus_lon], [plus_alt], [date])
# plus apex location in ECEF
plus_apex_root = np.array([plus_apex_x[0], plus_apex_y[0], plus_apex_z[0]])
# take half step from s/c along - vector direction
# then get the apex location
minus = step_along_mag_unit_vector(ecef_x, ecef_y, ecef_z, date,
direction=vector_direction,
scalar=-1,
num_steps=edge_steps,
step_size=edge_length/edge_steps)
minus_lat, minus_lon, minus_alt = ecef_to_geodetic(*minus)
minus_apex_x, minus_apex_y, minus_apex_z, minus_apex_lat, minus_apex_lon, minus_apex_alt = \
apex_location_info([minus_lat], [minus_lon], [minus_alt], [date])
minus_apex_root = np.array([minus_apex_x[0], minus_apex_y[0], minus_apex_z[0]])
# take difference in apex locations
apex_edge_length.append(np.sqrt((plus_apex_x[0]-minus_apex_x[0])**2 +
(plus_apex_y[0]-minus_apex_y[0])**2 +
(plus_apex_z[0]-minus_apex_z[0])**2))
# # take an alternative path to calculation
# # do field line trace around pos and neg apexes
# # then do intersection with field line projection thing
#
# # do a short centered field line trace around plus apex location
# other_trace = full_field_line(plus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# pos_edge_length, _, mind_pos = step_until_intersect(apex_root,
# other_trace,
# 1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# # do a short centered field line trace around 'minus' apex location
# other_trace = full_field_line(minus_apex_root, double_date, 0.,
# step_size=1.,
# max_steps=10,
# recurse=False)
# # need to determine where the intersection of apex field line
# # in relation to the vector direction from the s/c field apex location.
# minus_edge_length, _, mind_minus = step_until_intersect(apex_root,
# other_trace,
# -1, date,
# direction=vector_direction,
# field_step_size=1.,
# step_size_goal=edge_length/edge_steps)
# full_local_step.append(pos_edge_length + minus_edge_length)
# min_distance_plus.append(mind_pos)
# min_distance_minus.append(mind_minus)
# still sorting out alternative option for this calculation
# commented code is 'good' as far as the plan goes
# takes more time, so I haven't tested one vs the other yet
# having two live methods can lead to problems
# THIS IS A TODO (sort it out)
return np.array(apex_edge_length)#, np.array(full_local_step), np.array(min_distance_plus), np.array(min_distance_minus)
# # take step from one apex towards the other
# apex_path = step_along_mag_unit_vector(minus_apex_x, minus_apex_y, minus_apex_z, date,
# direction=vector_direction,
# num_steps=edge_steps,
# step_size=apex_edge_length[-1]/(edge_steps*2.))
# pos_apex_diff.append((apex_path[0] - plus_apex_x)**2 +
# (apex_path[1] - plus_apex_y)**2 +
# (apex_path[2] - plus_apex_z)**2)
# return apex_edge_length, path_apex_diff
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.