repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ajk8/hatchery | hatchery/project.py | package_has_version_file | python | def package_has_version_file(package_name):
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path) | Check to make sure _version.py is contained in the package | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L45-L48 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | get_project_name | python | def get_project_name():
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret | Grab the project name out of setup.py | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L78-L86 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | get_version | python | def get_version(package_name, ignore_cache=False):
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version | Get the version which is currently configured by the package | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L89-L103 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | set_version | python | def set_version(package_name, version_str):
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content) | Set the version in _version.py to version_str | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L106-L113 | [
"def get_version(package_name, ignore_cache=False):\n \"\"\" Get the version which is currently configured by the package \"\"\"\n if ignore_cache:\n with microcache.temporarily_disabled():\n found = helpers.regex_in_package_file(\n VERSION_SET_REGEX, '_version.py', package_name, return_match=True\n )\n else:\n found = helpers.regex_in_package_file(\n VERSION_SET_REGEX, '_version.py', package_name, return_match=True\n )\n if found is None:\n raise ProjectError('found {}, but __version__ is not defined')\n current_version = found['version']\n return current_version\n"
] | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | version_is_valid | python | def version_is_valid(version_str):
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True | Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L116-L128 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | _get_uploaded_versions_warehouse | python | def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None | Query the pypi index at index_url using warehouse api to find all of the "releases" | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L131-L137 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | _get_uploaded_versions_pypicloud | python | def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None | Query the pypi index at index_url using pypicloud api to find all versions | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L140-L151 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | version_already_uploaded | python | def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions | Check to see if the version specified has already been uploaded to the configured index | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L168-L172 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | convert_readme_to_rst | python | def convert_readme_to_rst():
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert') | Attempt to convert a README.md file into README.rst | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L208-L235 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | get_packaged_files | python | def get_packaged_files(package_name):
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')] | Collect relative paths to all files which have already been packaged | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L238-L242 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1
|
ajk8/hatchery | hatchery/project.py | multiple_packaged_versions | python | def multiple_packaged_versions(package_name):
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1 | Look through built package directory and see if there are multiple versions there | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L245-L253 | null | import setuptools
import os
import requests
import logging
import microcache
import pypandoc
import funcy
from . import helpers
# packaging got moved into its own top-level package in recent python versions
try:
from pkg_resources.extern import packaging
except ImportError:
import packaging
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.ERROR)
class ProjectError(RuntimeError):
pass
def get_package_name():
packages = setuptools.find_packages()
build_package = None
for package_name in packages:
root_package = package_name.split('.')[0]
if not build_package and root_package != 'tests':
build_package = root_package
continue
if root_package not in ['tests', build_package]:
raise ProjectError('detected too many top-level packages...something is amiss: ' +
str(packages))
if not build_package:
raise ProjectError('could not detect any packages to build!')
return build_package
def project_has_setup_py():
""" Check to make sure setup.py exists in the project """
return os.path.isfile('setup.py')
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path)
SETUP_PY_REGEX1 = r'with open\(.+_version\.py.+\)[^\:]+\:\s+exec\(.+read\(\)\)'
SETUP_PY_REGEX2 = r'=\s*find_module\(.+_version.+\)\s+_version\s*=\s*load_module\(.+_version.+\)'
def setup_py_uses__version_py():
""" Check to make sure setup.py is exec'ing _version.py """
for regex in (SETUP_PY_REGEX1, SETUP_PY_REGEX2):
if helpers.regex_in_file(regex, 'setup.py'):
return True
return False
def setup_py_uses___version__():
""" Check to make sure setup.py is using the __version__ variable in the setup block """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function('version', 'setup', setup_py_content)
return ret is not None and '__version__' in ret
VERSION_SET_REGEX = r'__version__\s*=\s*[\'"](?P<version>[^\'"]+)[\'"]'
def version_file_has___version__(package_name):
""" Check to make sure _version.py defines __version__ as a string """
return helpers.regex_in_package_file(VERSION_SET_REGEX, '_version.py', package_name)
def get_project_name():
""" Grab the project name out of setup.py """
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content)
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned
>>> version_is_valid('blah')
False
>>> version_is_valid('1.2.3')
True
"""
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None
@microcache.this
def _get_uploaded_versions(project_name, index_url, requests_verify=True):
server_types = ('warehouse', 'pypicloud')
for server_type in server_types:
get_method = globals()['_get_uploaded_versions_' + server_type]
versions = get_method(project_name, index_url, requests_verify)
if versions is not None:
logger.debug('detected pypi server: ' + server_type)
return versions
logger.debug('could not find evidence of project at {}, tried server types {}'.format(
index_url, server_types))
return []
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index
"""
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions
def get_latest_uploaded_version(project_name, index_url, requests_verify=True):
""" Grab the latest version of project_name according to index_url """
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
ret = None
for uploaded_version in all_versions:
ret = ret or '0.0'
left, right = packaging.version.Version(uploaded_version), packaging.version.Version(ret)
if left > right:
ret = uploaded_version
return ret
def version_is_latest(project_name, version_str, index_url, requests_verify=True):
""" Compare version_str with the latest (according to index_url) """
if version_already_uploaded(project_name, version_str, index_url, requests_verify):
return False
latest_uploaded_version = get_latest_uploaded_version(project_name, index_url, requests_verify)
if latest_uploaded_version is None:
return True
elif packaging.version.Version(version_str) > \
packaging.version.Version(latest_uploaded_version):
return True
return False
def project_has_readme_md():
""" See if project has a readme.md file """
for filename in os.listdir('.'):
if filename.lower() == 'readme.md':
return True
return False
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert')
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')]
|
ajk8/hatchery | hatchery/main.py | hatchery | python | def hatchery():
args = docopt.docopt(__doc__)
task_list = args['<task>']
if not task_list or 'help' in task_list or args['--help']:
print(__doc__.format(version=_version.__version__, config_files=config.CONFIG_LOCATIONS))
return 0
level_str = args['--log-level']
try:
level_const = getattr(logging, level_str.upper())
logging.basicConfig(level=level_const)
if level_const == logging.DEBUG:
workdir.options.debug = True
except LookupError:
logging.basicConfig()
logger.error('received invalid log level: ' + level_str)
return 1
for task in task_list:
if task not in ORDERED_TASKS:
logger.info('starting task: check')
logger.error('received invalid task: ' + task)
return 1
for task in CHECK_TASKS:
if task in task_list:
task_check(args)
break
if 'package' in task_list and not args['--release-version']:
logger.error('--release-version is required for the package task')
return 1
config_dict = _get_config_or_die(
calling_task='hatchery',
required_params=['auto_push_tag']
)
if config_dict['auto_push_tag'] and 'upload' in task_list:
logger.info('adding task: tag (auto_push_tag==True)')
task_list.append('tag')
# all commands will raise a SystemExit if they fail
# check will have already been run
for task in ORDERED_TASKS:
if task in task_list and task != 'check':
logger.info('starting task: ' + task)
globals()['task_' + task](args)
logger.info("all's well that ends well...hatchery out")
return 0 | Main entry point for the hatchery program | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/main.py#L379-L430 | [
"def _get_config_or_die(required_params=[], calling_task=None):\n try:\n config_dict = config.from_yaml()\n for key in required_params:\n if key not in config_dict.keys() or config_dict[key] is None:\n logger.error(\n '\"{}\" was not set in hatchery config files, '\n 'cannot continue with task: {}'.format(key, calling_task)\n )\n raise SystemExit(1)\n except config.ConfigError as e:\n logger.error(str(e))\n raise SystemExit(1)\n return config_dict\n",
"def task_check(args):\n logger.debug('verifying that project has a single package')\n try:\n package_name = project.get_package_name()\n except project.ProjectError as e:\n logger.error(str(e))\n raise SystemExit(1)\n ret = 0\n logger.debug('checking state of _version.py file')\n if (not project.package_has_version_file(package_name) or\n not project.version_file_has___version__(package_name)):\n _version_py_block = snippets.get_snippet_content('_version.py', package_name=package_name)\n logger.error(os.linesep.join((\n 'package does not have a valid _version.py file',\n '',\n _version_py_block\n )))\n ret = 1\n logger.debug('checking state of setup.py')\n if not project.setup_py_uses__version_py() or not project.setup_py_uses___version__():\n setup_py_block = snippets.get_snippet_content('setup.py', package_name=package_name)\n logger.error(os.linesep.join((\n 'could not detect valid method in setup.py:',\n '',\n setup_py_block\n )))\n ret = 1\n if ret:\n raise SystemExit(ret)\n logger.info('all checks passed!')\n"
] | """
hatchery (version {version})
Automate the process of testing, packaging, and uploading your project with
dynamic versioning and no source tree pollution!
Usage: hatchery [<task> ...] [options]
Just run from the root of your project and off you go. Tasks can be
chained, and will always be run in the order below regardless of the order
in which they are specified. Available tasks are:
help print this help output (ignores all other tasks)
check check to see if this project conforms to hatchery requirements
config print the computed config contents to the console
clean clean up the working directory
test run tests according to commands specified in .hatchery.yml
package create binary packages to be distributed
register register your package with the index if you haven't already
upload upload all created packages to a configured pypi index
tag tag your git repository with the release version and push
it back up to the origin
General options:
-h, --help print this help output and quit
--log-level=LEVEL
one of (debug, info, error, critical) [default: info]
-s, --stream-command-output
stream output of all subcommands as they are running.
if not set, output will be captured and printed to the
screen only on failed commands
-r=VER, --release-version=VER
version to use when packaging and registering
Note: version will be inferred when uploading
Notes on tagging:
Creating a tag requires a clean working copy, which means that there are
two very important prerequisites:
1. you should not use hatchery's tagging (and uploading) functionality
until you have committed all of your changes (good practice anyway)
2. you must have an entry for .hatchery.work in your .gitignore file
so that hatchery itself does not dirty up your working tree
Config files:
hatchery endeavors to have as few implicit requirements on your project
as possible (and those are managed using the "check" task). In order
for it to do its work, therefore, some configuration has to be provided.
This is done using config files. There are two files (user-level, and
project-level) that can be used to specify these configuration files:
{config_files}
In the case where both files define the same parameters, the project-level
file wins. See README.md for more information about the available
configuration parameters.
"""
import docopt
import logging
import funcy
import os
import workdir
import git
import ruamel.yaml as yaml
from . import _version
from . import executor
from . import project
from . import config
from . import snippets
from . import helpers
logger = logging.getLogger(__name__)
workdir.options.path = '.hatchery.work'
workdir.options.sync_exclude_regex_list = [r'\.hatchery\.work']
def _get_package_name_or_die():
try:
package_name = project.get_package_name()
except project.ProjectError as e:
logger.error(str(e))
raise SystemExit(1)
return package_name
def _get_config_or_die(required_params=[], calling_task=None):
try:
config_dict = config.from_yaml()
for key in required_params:
if key not in config_dict.keys() or config_dict[key] is None:
logger.error(
'"{}" was not set in hatchery config files, '
'cannot continue with task: {}'.format(key, calling_task)
)
raise SystemExit(1)
except config.ConfigError as e:
logger.error(str(e))
raise SystemExit(1)
return config_dict
def _valid_version_or_die(release_version):
if not project.version_is_valid(release_version):
logger.error('version "{}" is not pip-compatible, try another!'.format(release_version))
raise SystemExit(1)
def _latest_version_or_die(release_version, project_name, pypi_repository, pypi_verify_ssl):
if helpers.string_is_url(pypi_repository):
index_url = pypi_repository
else:
pypirc_dict = config.from_pypirc(pypi_repository)
index_url = pypirc_dict['repository']
if project.version_already_uploaded(project_name, release_version, index_url, pypi_verify_ssl):
logger.error('{}=={} already exists on index {}'.format(
project_name, release_version, index_url
))
raise SystemExit(1)
elif not project.version_is_latest(project_name, release_version, index_url, pypi_verify_ssl):
latest_version = project.get_latest_uploaded_version(
project_name, index_url, pypi_verify_ssl
)
logger.error('{}=={} is older than the latest ({}) on index {}'.format(
project_name, release_version, latest_version, index_url
))
raise SystemExit(1)
def _check_and_set_version(release_version, package_name, project_name,
pypi_repository, pypi_verify_ssl):
set_flag = True
if not release_version:
set_flag = False
release_version = project.get_version(package_name)
_valid_version_or_die(release_version)
_latest_version_or_die(release_version, project_name, pypi_repository, pypi_verify_ssl)
if set_flag:
project.set_version(package_name, release_version)
return release_version
def _log_failure_and_die(error_msg, call_result, log_full_result):
msg = error_msg
if log_full_result:
msg += os.linesep.join((':', call_result.format_error_msg()))
logger.error(msg)
raise SystemExit(1)
def task_tag(args):
if not os.path.isdir(workdir.options.path):
logger.error('{} does not exist, cannot fetch tag version!'.format(workdir.options.path))
raise SystemExit(1)
with workdir.as_cwd():
config_dict = _get_config_or_die(
calling_task='upload',
required_params=['git_remote_name']
)
git_remote_name = config_dict['git_remote_name']
package_name = _get_package_name_or_die()
release_version = project.get_version(package_name, ignore_cache=True)
# this part actually happens outside of the working directory!
repo = git.Repo()
if repo.is_dirty():
logger.error('cannot create tag, repo is dirty')
raise SystemExit(1)
if git_remote_name not in [x.name for x in repo.remotes]:
logger.error(
'cannot push tag to remote "{}" as it is not defined in repo'.format(git_remote_name)
)
raise SystemExit(1)
repo.create_tag(
path=release_version,
message='tag {} created by hatchery'.format(release_version)
)
repo.remotes[git_remote_name].push(tags=True)
logger.info('version {} tagged and pushed!'.format(release_version))
def _call_twine(args, pypi_repository, suppress_output):
twine_args = ['twine'] + list(args)
if helpers.string_is_url(pypi_repository):
pypirc_path = config.pypirc_temp(pypi_repository)
pypirc_index_name = config.PYPIRC_TEMP_INDEX_NAME
twine_args += ['-r', pypirc_index_name, '--config-file', pypirc_path]
else:
twine_args += ['-r', pypi_repository]
return executor.call(twine_args, suppress_output=suppress_output)
def task_upload(args):
suppress_output = not args['--stream-command-output']
if not os.path.isdir(workdir.options.path):
logger.error('{} does not exist, nothing to upload!'.format(workdir.options.path))
raise SystemExit(1)
with workdir.as_cwd():
config_dict = _get_config_or_die(
calling_task='upload',
required_params=['pypi_repository', 'pypi_verify_ssl']
)
pypi_repository = config_dict['pypi_repository']
pypi_verify_ssl = config_dict['pypi_verify_ssl']
project_name = project.get_project_name()
package_name = _get_package_name_or_die()
if project.multiple_packaged_versions(package_name):
logger.error(
'multiple package versions found, refusing to upload -- run `hatchery clean`'
)
raise SystemExit(1)
release_version = project.get_version(package_name, ignore_cache=True)
_valid_version_or_die(release_version)
_latest_version_or_die(release_version, project_name, pypi_repository, pypi_verify_ssl)
result = _call_twine(['upload', 'dist/*'], pypi_repository, suppress_output)
if result.exitval:
if 'not allowed to edit' in result.stderr:
logger.error('could not upload packages, try `hatchery register`')
else:
_log_failure_and_die(
'failed to upload packages', result, log_full_result=suppress_output
)
raise SystemExit(1)
logger.info('successfully uploaded {}=={} to [{}]'.format(
project_name, release_version, pypi_repository
))
def _create_packages(create_wheel, suppress_output):
with workdir.as_cwd():
setup_args = ['sdist']
if create_wheel:
setup_args.append('bdist_wheel')
result = executor.setup(setup_args, suppress_output=suppress_output)
if result.exitval:
_log_failure_and_die(
'failed to package project', result, log_full_result=suppress_output
)
def task_register(args):
release_version = args['--release-version']
suppress_output = not args['--stream-command-output']
workdir.sync()
with workdir.as_cwd():
config_dict = _get_config_or_die(
calling_task='register',
required_params=['pypi_repository', 'pypi_verify_ssl']
)
pypi_repository = config_dict['pypi_repository']
pypi_verify_ssl = config_dict['pypi_verify_ssl']
project_name = project.get_project_name()
package_name = _get_package_name_or_die()
packaged_files = project.get_packaged_files(package_name)
if len(packaged_files) == 0:
if not release_version:
release_version = project.get_version(package_name)
if not project.version_is_valid(release_version):
logger.info('using version 0.0 for registration purposes')
release_version = '0.0'
_check_and_set_version(
release_version, package_name, project_name, pypi_repository, pypi_verify_ssl
)
_create_packages(create_wheel=False, suppress_output=suppress_output)
packaged_files = project.get_packaged_files(package_name)
package_path = packaged_files[0]
result = _call_twine(['register', package_path], pypi_repository, suppress_output)
if result.exitval or '(400)' in result.stdout:
_log_failure_and_die(
'failed to register project', result, log_full_result=suppress_output
)
logger.info('successfully registered {} with [{}]'.format(project_name, pypi_repository))
def task_package(args):
release_version = args['--release-version']
suppress_output = not args['--stream-command-output']
workdir.sync()
with workdir.as_cwd():
config_dict = _get_config_or_die(
calling_task='package',
required_params=['create_wheel', 'readme_to_rst', 'pypi_repository', 'pypi_verify_ssl']
)
pypi_repository = config_dict['pypi_repository']
pypi_verify_ssl = config_dict['pypi_verify_ssl']
project_name = project.get_project_name()
package_name = _get_package_name_or_die()
_check_and_set_version(
release_version, package_name, project_name, pypi_repository, pypi_verify_ssl
)
if config_dict['readme_to_rst']:
if project.project_has_readme_md():
try:
project.convert_readme_to_rst()
except project.ProjectError as e:
if 'could not convert' in str(e):
logger.error(e)
raise SystemExit(1)
else:
logger.info(e)
_create_packages(config_dict['create_wheel'], suppress_output)
logger.info('successfully packaged {}=={}'.format(project_name, release_version))
def task_test(args):
suppress_output = not args['--stream-command-output']
workdir.sync()
with workdir.as_cwd():
config_dict = _get_config_or_die(
calling_task='test',
required_params=['test_command']
)
test_commands = config_dict['test_command']
if not funcy.is_list(test_commands):
test_commands = [test_commands]
for cmd_str in test_commands:
result = executor.call(cmd_str, suppress_output=suppress_output)
if result.exitval:
_log_failure_and_die('tests failed', result, log_full_result=suppress_output)
logger.info('testing completed successfully')
def task_clean(args):
workdir.remove()
def task_config(args):
config_dict = _get_config_or_die(
calling_task='config',
required_params=[]
)
print(os.linesep.join((
'### yaml ###',
'',
yaml.dump(config_dict, Dumper=yaml.RoundTripDumper, indent=4),
'### /yaml ###'
)))
def task_check(args):
logger.debug('verifying that project has a single package')
try:
package_name = project.get_package_name()
except project.ProjectError as e:
logger.error(str(e))
raise SystemExit(1)
ret = 0
logger.debug('checking state of _version.py file')
if (not project.package_has_version_file(package_name) or
not project.version_file_has___version__(package_name)):
_version_py_block = snippets.get_snippet_content('_version.py', package_name=package_name)
logger.error(os.linesep.join((
'package does not have a valid _version.py file',
'',
_version_py_block
)))
ret = 1
logger.debug('checking state of setup.py')
if not project.setup_py_uses__version_py() or not project.setup_py_uses___version__():
setup_py_block = snippets.get_snippet_content('setup.py', package_name=package_name)
logger.error(os.linesep.join((
'could not detect valid method in setup.py:',
'',
setup_py_block
)))
ret = 1
if ret:
raise SystemExit(ret)
logger.info('all checks passed!')
ORDERED_TASKS = ['check', 'config', 'clean', 'test', 'package', 'register', 'upload', 'tag']
CHECK_TASKS = [t for t in ORDERED_TASKS if t not in ('config', 'clean')]
|
ajk8/hatchery | hatchery/executor.py | call | python | def call(cmd_args, suppress_output=False):
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result | Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1 | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/executor.py#L66-L85 | [
"def run(self):\n self.process = subprocess.Popen(\n self.cmd_args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds='posix' in sys.builtin_module_names,\n bufsize=1\n )\n self._set_stream_process('stdout')\n self._set_stream_process('stderr')\n self.process.wait()\n\n return CallResult(self.process.returncode, self.stdout_str, self.stderr_str)\n"
] | import subprocess
import sys
import shlex
import logging
import funcy
import os
import six
logger = logging.getLogger(__name__)
class CallResult(object):
""" Basic representation of a command execution result """
def __init__(self, exitval, stdout, stderr):
self.exitval = exitval
self.stdout = stdout
self.stderr = stderr
def format_error_msg(self):
ret_lines = ['### exitval: {} ###'.format(self.exitval)]
if self.stdout:
ret_lines += ['### stdout ###', self.stdout, '### /stdout ###']
if self.stderr:
ret_lines += ['### stderr ###', self.stderr, '### /stderr ###']
return os.linesep.join(ret_lines)
class CallRequest(object):
""" Class to wrap up command execution and non-blocking output capture """
def __init__(self, cmd_args, suppress_output=False):
self.cmd_args = cmd_args
self.suppress_output = suppress_output
self.stdout_str = ''
self.stderr_str = ''
self.process = None
def _set_stream_process(self, stream_name):
# this is magic...the behavior suggests that the code inside this with block
# is assigned to events on self.process.stdout like a lambda function
with getattr(self.process, stream_name):
for line in iter(getattr(self.process, stream_name).readline, b''):
if six.PY3:
line = line.decode()
if not self.suppress_output:
getattr(sys, stream_name).write(line)
getattr(sys, stream_name).flush()
setattr(self, stream_name + '_str', getattr(self, stream_name + '_str') + line)
def run(self):
self.process = subprocess.Popen(
self.cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds='posix' in sys.builtin_module_names,
bufsize=1
)
self._set_stream_process('stdout')
self._set_stream_process('stderr')
self.process.wait()
return CallResult(self.process.returncode, self.stdout_str, self.stderr_str)
def setup(cmd_args, suppress_output=False):
""" Call a setup.py command or list of commands
>>> result = setup('--name', suppress_output=True)
>>> result.exitval
0
>>> result = setup('notreal')
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
cmd_args = [sys.executable, 'setup.py'] + [x for x in cmd_args]
return call(cmd_args, suppress_output=suppress_output)
|
ajk8/hatchery | hatchery/executor.py | setup | python | def setup(cmd_args, suppress_output=False):
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
cmd_args = [sys.executable, 'setup.py'] + [x for x in cmd_args]
return call(cmd_args, suppress_output=suppress_output) | Call a setup.py command or list of commands
>>> result = setup('--name', suppress_output=True)
>>> result.exitval
0
>>> result = setup('notreal')
>>> result.exitval
1 | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/executor.py#L88-L101 | [
"def call(cmd_args, suppress_output=False):\n \"\"\" Call an arbitary command and return the exit value, stdout, and stderr as a tuple\n\n Command can be passed in as either a string or iterable\n\n >>> result = call('hatchery', suppress_output=True)\n >>> result.exitval\n 0\n >>> result = call(['hatchery', 'notreal'])\n >>> result.exitval\n 1\n \"\"\"\n if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):\n cmd_args = shlex.split(cmd_args)\n logger.info('executing `{}`'.format(' '.join(cmd_args)))\n call_request = CallRequest(cmd_args, suppress_output=suppress_output)\n call_result = call_request.run()\n if call_result.exitval:\n logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))\n return call_result\n"
] | import subprocess
import sys
import shlex
import logging
import funcy
import os
import six
logger = logging.getLogger(__name__)
class CallResult(object):
""" Basic representation of a command execution result """
def __init__(self, exitval, stdout, stderr):
self.exitval = exitval
self.stdout = stdout
self.stderr = stderr
def format_error_msg(self):
ret_lines = ['### exitval: {} ###'.format(self.exitval)]
if self.stdout:
ret_lines += ['### stdout ###', self.stdout, '### /stdout ###']
if self.stderr:
ret_lines += ['### stderr ###', self.stderr, '### /stderr ###']
return os.linesep.join(ret_lines)
class CallRequest(object):
""" Class to wrap up command execution and non-blocking output capture """
def __init__(self, cmd_args, suppress_output=False):
self.cmd_args = cmd_args
self.suppress_output = suppress_output
self.stdout_str = ''
self.stderr_str = ''
self.process = None
def _set_stream_process(self, stream_name):
# this is magic...the behavior suggests that the code inside this with block
# is assigned to events on self.process.stdout like a lambda function
with getattr(self.process, stream_name):
for line in iter(getattr(self.process, stream_name).readline, b''):
if six.PY3:
line = line.decode()
if not self.suppress_output:
getattr(sys, stream_name).write(line)
getattr(sys, stream_name).flush()
setattr(self, stream_name + '_str', getattr(self, stream_name + '_str') + line)
def run(self):
self.process = subprocess.Popen(
self.cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds='posix' in sys.builtin_module_names,
bufsize=1
)
self._set_stream_process('stdout')
self._set_stream_process('stderr')
self.process.wait()
return CallResult(self.process.returncode, self.stdout_str, self.stderr_str)
def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result
|
ajk8/hatchery | hatchery/config.py | from_yaml | python | def from_yaml():
default_yaml_str = snippets.get_snippet_content('hatchery.yml')
ret = yaml.load(default_yaml_str, Loader=yaml.RoundTripLoader)
for config_path in CONFIG_LOCATIONS:
config_path = os.path.expanduser(config_path)
if os.path.isfile(config_path):
with open(config_path) as config_file:
config_dict = yaml.load(config_file, Loader=yaml.RoundTripLoader)
if config_dict is None:
continue
for k, v in config_dict.items():
if k not in ret.keys():
raise ConfigError(
'found garbage key "{}" in {}'.format(k, config_path)
)
ret[k] = v
return ret | Load configuration from yaml source(s), cached to only run once | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/config.py#L23-L40 | [
"def get_snippet_content(snippet_name, **format_kwargs):\n \"\"\" Load the content from a snippet file which exists in SNIPPETS_ROOT \"\"\"\n filename = snippet_name + '.snippet'\n snippet_file = os.path.join(SNIPPETS_ROOT, filename)\n if not os.path.isfile(snippet_file):\n raise ValueError('could not find snippet with name ' + filename)\n ret = helpers.get_file_content(snippet_file)\n if format_kwargs:\n ret = ret.format(**format_kwargs)\n return ret\n"
] | import os
import microcache
import tempfile
import ruamel.yaml as yaml
from . import snippets
try:
import ConfigParser as configparser
except ImportError:
import configparser
class ConfigError(RuntimeError):
pass
CONFIG_LOCATIONS = [
'~/.hatchery/hatchery.yaml', '~/.hatchery/hatchery.yml', '.hatchery.yaml', '.hatchery.yml'
]
@microcache.this
PYPIRC_LOCATIONS = ['~/.pypirc']
@microcache.this
def from_pypirc(pypi_repository):
""" Load configuration from .pypirc file, cached to only run once """
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue
if 'index-servers' not in parser.options('distutils'):
continue
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option)
if not ret:
raise ConfigError(
'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) +
', remember that it needs an entry in [distutils] and its own section'
)
return ret
PYPIRC_TEMP_INDEX_NAME = 'hatchery_tmp'
PYPIRC_TEMPLATE = '''
[distutils]
index-servers =
{index_name}
[{index_name}]
repository = {index_url}
username = anonymous
password = nopassword
'''
@microcache.this
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name
|
ajk8/hatchery | hatchery/config.py | from_pypirc | python | def from_pypirc(pypi_repository):
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue
if 'index-servers' not in parser.options('distutils'):
continue
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option)
if not ret:
raise ConfigError(
'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) +
', remember that it needs an entry in [distutils] and its own section'
)
return ret | Load configuration from .pypirc file, cached to only run once | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/config.py#L47-L70 | null | import os
import microcache
import tempfile
import ruamel.yaml as yaml
from . import snippets
try:
import ConfigParser as configparser
except ImportError:
import configparser
class ConfigError(RuntimeError):
pass
CONFIG_LOCATIONS = [
'~/.hatchery/hatchery.yaml', '~/.hatchery/hatchery.yml', '.hatchery.yaml', '.hatchery.yml'
]
@microcache.this
def from_yaml():
""" Load configuration from yaml source(s), cached to only run once """
default_yaml_str = snippets.get_snippet_content('hatchery.yml')
ret = yaml.load(default_yaml_str, Loader=yaml.RoundTripLoader)
for config_path in CONFIG_LOCATIONS:
config_path = os.path.expanduser(config_path)
if os.path.isfile(config_path):
with open(config_path) as config_file:
config_dict = yaml.load(config_file, Loader=yaml.RoundTripLoader)
if config_dict is None:
continue
for k, v in config_dict.items():
if k not in ret.keys():
raise ConfigError(
'found garbage key "{}" in {}'.format(k, config_path)
)
ret[k] = v
return ret
PYPIRC_LOCATIONS = ['~/.pypirc']
@microcache.this
PYPIRC_TEMP_INDEX_NAME = 'hatchery_tmp'
PYPIRC_TEMPLATE = '''
[distutils]
index-servers =
{index_name}
[{index_name}]
repository = {index_url}
username = anonymous
password = nopassword
'''
@microcache.this
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name
|
ajk8/hatchery | hatchery/config.py | pypirc_temp | python | def pypirc_temp(index_url):
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name | Create a temporary pypirc file for interaction with twine | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/config.py#L87-L93 | null | import os
import microcache
import tempfile
import ruamel.yaml as yaml
from . import snippets
try:
import ConfigParser as configparser
except ImportError:
import configparser
class ConfigError(RuntimeError):
pass
CONFIG_LOCATIONS = [
'~/.hatchery/hatchery.yaml', '~/.hatchery/hatchery.yml', '.hatchery.yaml', '.hatchery.yml'
]
@microcache.this
def from_yaml():
""" Load configuration from yaml source(s), cached to only run once """
default_yaml_str = snippets.get_snippet_content('hatchery.yml')
ret = yaml.load(default_yaml_str, Loader=yaml.RoundTripLoader)
for config_path in CONFIG_LOCATIONS:
config_path = os.path.expanduser(config_path)
if os.path.isfile(config_path):
with open(config_path) as config_file:
config_dict = yaml.load(config_file, Loader=yaml.RoundTripLoader)
if config_dict is None:
continue
for k, v in config_dict.items():
if k not in ret.keys():
raise ConfigError(
'found garbage key "{}" in {}'.format(k, config_path)
)
ret[k] = v
return ret
PYPIRC_LOCATIONS = ['~/.pypirc']
@microcache.this
def from_pypirc(pypi_repository):
""" Load configuration from .pypirc file, cached to only run once """
ret = {}
pypirc_locations = PYPIRC_LOCATIONS
for pypirc_path in pypirc_locations:
pypirc_path = os.path.expanduser(pypirc_path)
if os.path.isfile(pypirc_path):
parser = configparser.SafeConfigParser()
parser.read(pypirc_path)
if 'distutils' not in parser.sections():
continue
if 'index-servers' not in parser.options('distutils'):
continue
if pypi_repository not in parser.get('distutils', 'index-servers'):
continue
if pypi_repository in parser.sections():
for option in parser.options(pypi_repository):
ret[option] = parser.get(pypi_repository, option)
if not ret:
raise ConfigError(
'repository does not appear to be configured in pypirc ({})'.format(pypi_repository) +
', remember that it needs an entry in [distutils] and its own section'
)
return ret
PYPIRC_TEMP_INDEX_NAME = 'hatchery_tmp'
PYPIRC_TEMPLATE = '''
[distutils]
index-servers =
{index_name}
[{index_name}]
repository = {index_url}
username = anonymous
password = nopassword
'''
@microcache.this
|
ajk8/hatchery | hatchery/snippets.py | get_snippet_content | python | def get_snippet_content(snippet_name, **format_kwargs):
filename = snippet_name + '.snippet'
snippet_file = os.path.join(SNIPPETS_ROOT, filename)
if not os.path.isfile(snippet_file):
raise ValueError('could not find snippet with name ' + filename)
ret = helpers.get_file_content(snippet_file)
if format_kwargs:
ret = ret.format(**format_kwargs)
return ret | Load the content from a snippet file which exists in SNIPPETS_ROOT | train | https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/snippets.py#L7-L16 | null | import os
from . import helpers
SNIPPETS_ROOT = os.path.join(os.path.dirname(__file__), 'snippets')
|
alexras/pylsdj | pylsdj/instruments.py | Instruments.import_from_file | python | def import_from_file(self, index, filename):
with open(filename, 'r') as fp:
self._import_from_struct(index, json.load(fp)) | Import this instrument's settings from the given file. Will
automatically add the instrument's synth and table to the song's
synths and tables if needed.
Note that this may invalidate existing instrument accessor objects.
:param index: the index into which to import
:param filename: the file from which to load
:raises ImportException: if importing failed, usually because the song
doesn't have enough synth or table slots left for the instrument's
synth or table | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instruments.py#L63-L80 | [
"def _import_from_struct(self, index, lsdinst_struct):\n instr_type = lsdinst_struct['data']['instrument_type']\n\n self.allocate(index, instr_type)\n\n instrument = self.song.instruments[index]\n instrument.name = lsdinst_struct['name']\n\n # Make sure we've got enough room for the table if we need it\n if 'table' in lsdinst_struct:\n table_index = self.song.tables.next_free()\n\n if table_index is None:\n raise ImportException(\n \"No available table slot in which to store the \"\n \"instrument's table data\")\n\n self.song.tables.allocate(table_index)\n instrument.table = self.song.tables[table_index]\n\n instrument.import_lsdinst(lsdinst_struct)\n"
] | class Instruments(object):
instrumentClasses = {
"pulse": PulseInstrument,
"wave": WaveInstrument,
"kit": KitInstrument,
"noise": NoiseInstrument
}
def __init__(self, song):
self.song = song
self.alloc_table = song.song_data.instr_alloc_table
self.access_objects = []
for index in range(len(self.alloc_table)):
instr_type = self.song.song_data.instruments[index].instrument_type
self.access_objects.append(
self.instrumentClasses[instr_type](song, index))
def _set_instrument_type(self, index, instrument_type):
assert instrument_type in Instruments.instrumentClasses, (
"Invalid instrument type '%s'" % (instrument_type))
assert_index_sane(index, len(self.song.song_data.instruments))
current_access_object = self.access_objects[index]
# If this instrument is of a different type than we're currently
# accessing, we've got to make a new access object of the
# appropriate type
if (current_access_object is None or
current_access_object.type != instrument_type):
self.access_objects[index] = (
self.instrumentClasses[instrument_type](self.song, index))
self.access_objects[index].type = instrument_type
def __getitem__(self, index):
assert_index_sane(index, len(self.alloc_table))
if not self.alloc_table[index]:
return None
return self.access_objects[index]
def as_list(self):
return self.access_objects
def allocate(self, index, instrument_type):
self.alloc_table[index] = True
self._set_instrument_type(index, instrument_type)
def _import_from_struct(self, index, lsdinst_struct):
instr_type = lsdinst_struct['data']['instrument_type']
self.allocate(index, instr_type)
instrument = self.song.instruments[index]
instrument.name = lsdinst_struct['name']
# Make sure we've got enough room for the table if we need it
if 'table' in lsdinst_struct:
table_index = self.song.tables.next_free()
if table_index is None:
raise ImportException(
"No available table slot in which to store the "
"instrument's table data")
self.song.tables.allocate(table_index)
instrument.table = self.song.tables[table_index]
instrument.import_lsdinst(lsdinst_struct)
|
alexras/pylsdj | pylsdj/project.py | load_lsdsng | python | def load_lsdsng(filename):
# Load preamble data so that we know the name and version of the song
with open(filename, 'rb') as fp:
preamble_data = bread.parse(fp, spec.lsdsng_preamble)
with open(filename, 'rb') as fp:
# Skip the preamble this time around
fp.seek(int(len(preamble_data) / 8))
# Load compressed data into a block map and use BlockReader to
# decompress it
factory = BlockFactory()
while True:
block_data = bytearray(fp.read(blockutils.BLOCK_SIZE))
if len(block_data) == 0:
break
block = factory.new_block()
block.data = block_data
remapped_blocks = filepack.renumber_block_keys(factory.blocks)
reader = BlockReader()
compressed_data = reader.read(remapped_blocks)
# Now, decompress the raw data and use it and the preamble to construct
# a Project
raw_data = filepack.decompress(compressed_data)
name = preamble_data.name
version = preamble_data.version
size_blks = int(math.ceil(
float(len(compressed_data)) / blockutils.BLOCK_SIZE))
return Project(name, version, size_blks, raw_data) | Load a Project from a ``.lsdsng`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project` | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L10-L52 | [
"def decompress(compressed_data):\n \"\"\"Decompress data that has been compressed by the filepack algorithm.\n\n :param compressed_data: an array of compressed data bytes to decompress\n\n :rtype: an array of decompressed bytes\"\"\"\n raw_data = []\n\n index = 0\n\n while index < len(compressed_data):\n current = compressed_data[index]\n index += 1\n\n if current == RLE_BYTE:\n directive = compressed_data[index]\n index += 1\n\n if directive == RLE_BYTE:\n raw_data.append(RLE_BYTE)\n else:\n count = compressed_data[index]\n index += 1\n\n raw_data.extend([directive] * count)\n elif current == SPECIAL_BYTE:\n directive = compressed_data[index]\n index += 1\n\n if directive == SPECIAL_BYTE:\n raw_data.append(SPECIAL_BYTE)\n elif directive == DEFAULT_WAVE_BYTE:\n count = compressed_data[index]\n index += 1\n\n raw_data.extend(DEFAULT_WAVE * count)\n elif directive == DEFAULT_INSTR_BYTE:\n count = compressed_data[index]\n index += 1\n\n raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)\n elif directive == EOF_BYTE:\n assert False, (\"Unexpected EOF command encountered while \"\n \"decompressing\")\n else:\n assert False, \"Countered unexpected sequence 0x%02x 0x%02x\" % (\n current, directive)\n else:\n raw_data.append(current)\n\n return raw_data\n",
"def renumber_block_keys(blocks):\n \"\"\"Renumber a block map's indices so that tehy match the blocks' block\n switch statements.\n\n :param blocks a block map to renumber\n :rtype: a renumbered copy of the block map\n \"\"\"\n\n # There is an implicit block switch to the 0th block at the start of the\n # file\n byte_switch_keys = [0]\n block_keys = list(blocks.keys())\n\n # Scan the blocks, recording every block switch statement\n for block in list(blocks.values()):\n i = 0\n while i < len(block.data) - 1:\n current_byte = block.data[i]\n next_byte = block.data[i + 1]\n\n if current_byte == RLE_BYTE:\n if next_byte == RLE_BYTE:\n i += 2\n else:\n i += 3\n elif current_byte == SPECIAL_BYTE:\n if next_byte in SPECIAL_DEFAULTS:\n i += 3\n elif next_byte == SPECIAL_BYTE:\n i += 2\n else:\n if next_byte != EOF_BYTE:\n byte_switch_keys.append(next_byte)\n\n break\n\n else:\n i += 1\n\n byte_switch_keys.sort()\n block_keys.sort()\n\n assert len(byte_switch_keys) == len(block_keys), (\n \"Number of blocks that are target of block switches (%d) \"\n % (len(byte_switch_keys)) +\n \"does not equal number of blocks in the song (%d)\"\n % (len(block_keys)) +\n \"; possible corruption\")\n\n if byte_switch_keys == block_keys:\n # No remapping necessary\n return blocks\n\n new_block_map = {}\n\n for block_key, byte_switch_key in zip(\n block_keys, byte_switch_keys):\n\n new_block_map[byte_switch_key] = blocks[block_key]\n\n return new_block_map\n",
"def new_block(self):\n block = Block(self.max_id, [])\n self.blocks[self.max_id] = block\n self.max_id += 1\n\n return block\n",
"def read(self, block_dict):\n \"\"\"Parses a dictionary of blocks into a compressed byte stream.\n \"\"\"\n\n return filepack.merge(block_dict)\n"
] | import math
import bread
from . import bread_spec as spec
from .song import Song
from . import filepack
from . import blockutils
from .blockutils import BlockReader, BlockWriter, BlockFactory
def load_srm(filename):
"""Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
"""
# .srm files are just decompressed projects without headers
# In order to determine the file's size in compressed blocks, we have to
# compress it first
with open(filename, 'rb') as fp:
raw_data = fp.read()
compressed_data = filepack.compress(raw_data)
factory = BlockFactory()
writer = BlockWriter()
writer.write(compressed_data, factory)
size_in_blocks = len(factory.blocks)
# We'll give the file a dummy name ("SRMLOAD") and version, since we know
# neither
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data)
class Project(object):
def __init__(self, name, version, size_blks, data):
self.name = name
"""the project's name"""
self.version = version
"""the project's version (incremented on every save in LSDJ)"""
self.size_blks = size_blks
"""the size of the song in filesystem blocks"""
# Useful for applications tracking whether a project was modified since
# it was loaded.
self.modified = False
# Since parsing the song is expensive, we'll load it lazily from the
# raw data on-demand
self.__song_data = None
self._song = None
self._raw_bytes = data
@property
def _song_data(self):
if self.__song_data is None:
self.__song_data = bread.parse(self._raw_bytes, spec.song)
return self.__song_data
@_song_data.setter
def _song_data(self, value):
self.__song_data = value
@property
def song(self):
"""the song associated with the project"""
if self._song is None:
self._song = Song(self._song_data)
return self._song
@song.setter
def song(self, value):
self._song = value
def get_raw_data(self):
return bread.write(self._song_data, spec.song)
def save(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead
"""
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data))
def save_lsdsng(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
"""
return self.save(filename)
def save_srm(self, filename):
"""Save a project in .srm format to the target file.
:param filename: the name of the file to which to save
"""
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data)
def __eq__(self, other):
if other is None:
return False
else:
return self._song_data == other._song_data
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s, %d>\n" % (self.name, self.version)
|
alexras/pylsdj | pylsdj/project.py | load_srm | python | def load_srm(filename):
# .srm files are just decompressed projects without headers
# In order to determine the file's size in compressed blocks, we have to
# compress it first
with open(filename, 'rb') as fp:
raw_data = fp.read()
compressed_data = filepack.compress(raw_data)
factory = BlockFactory()
writer = BlockWriter()
writer.write(compressed_data, factory)
size_in_blocks = len(factory.blocks)
# We'll give the file a dummy name ("SRMLOAD") and version, since we know
# neither
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data) | Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project` | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L55-L82 | [
"def compress(raw_data):\n \"\"\"Compress raw bytes with the filepack algorithm.\n\n :param raw_data: an array of raw data bytes to compress\n\n :rtype: a list of compressed bytes\n \"\"\"\n raw_data = bytearray(raw_data)\n compressed_data = []\n\n data_size = len(raw_data)\n\n index = 0\n next_bytes = [-1, -1, -1]\n\n def is_default_instrument(index):\n if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):\n return False\n\n instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]\n\n if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:\n return False\n\n return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK\n\n def is_default_wave(index):\n return (index + len(DEFAULT_WAVE) <= len(raw_data) and\n raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)\n\n while index < data_size:\n current_byte = raw_data[index]\n\n for i in range(3):\n if index < data_size - (i + 1):\n next_bytes[i] = raw_data[index + (i + 1)]\n else:\n next_bytes[i] = -1\n\n if current_byte == RLE_BYTE:\n compressed_data.append(RLE_BYTE)\n compressed_data.append(RLE_BYTE)\n index += 1\n elif current_byte == SPECIAL_BYTE:\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(SPECIAL_BYTE)\n index += 1\n elif is_default_instrument(index):\n counter = 1\n index += len(DEFAULT_INSTRUMENT_FILEPACK)\n\n while (is_default_instrument(index) and\n counter < 0x100):\n counter += 1\n index += len(DEFAULT_INSTRUMENT_FILEPACK)\n\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(DEFAULT_INSTR_BYTE)\n compressed_data.append(counter)\n\n elif is_default_wave(index):\n counter = 1\n index += len(DEFAULT_WAVE)\n\n while is_default_wave(index) and counter < 0xff:\n counter += 1\n index += len(DEFAULT_WAVE)\n\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(DEFAULT_WAVE_BYTE)\n compressed_data.append(counter)\n\n elif (current_byte == next_bytes[0] and\n next_bytes[0] == next_bytes[1] and\n next_bytes[1] == next_bytes[2]):\n # Do RLE compression\n\n compressed_data.append(RLE_BYTE)\n compressed_data.append(current_byte)\n counter = 0\n\n while (index < data_size and\n raw_data[index] == current_byte and\n counter < 0xff):\n index += 1\n counter += 1\n\n compressed_data.append(counter)\n else:\n compressed_data.append(current_byte)\n index += 1\n\n return compressed_data\n",
"def write(self, compressed_data, factory):\n \"\"\"Splits a compressed byte stream into blocks.\n \"\"\"\n return filepack.split(compressed_data, BLOCK_SIZE,\n factory)\n"
] | import math
import bread
from . import bread_spec as spec
from .song import Song
from . import filepack
from . import blockutils
from .blockutils import BlockReader, BlockWriter, BlockFactory
def load_lsdsng(filename):
"""Load a Project from a ``.lsdsng`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
"""
# Load preamble data so that we know the name and version of the song
with open(filename, 'rb') as fp:
preamble_data = bread.parse(fp, spec.lsdsng_preamble)
with open(filename, 'rb') as fp:
# Skip the preamble this time around
fp.seek(int(len(preamble_data) / 8))
# Load compressed data into a block map and use BlockReader to
# decompress it
factory = BlockFactory()
while True:
block_data = bytearray(fp.read(blockutils.BLOCK_SIZE))
if len(block_data) == 0:
break
block = factory.new_block()
block.data = block_data
remapped_blocks = filepack.renumber_block_keys(factory.blocks)
reader = BlockReader()
compressed_data = reader.read(remapped_blocks)
# Now, decompress the raw data and use it and the preamble to construct
# a Project
raw_data = filepack.decompress(compressed_data)
name = preamble_data.name
version = preamble_data.version
size_blks = int(math.ceil(
float(len(compressed_data)) / blockutils.BLOCK_SIZE))
return Project(name, version, size_blks, raw_data)
class Project(object):
def __init__(self, name, version, size_blks, data):
self.name = name
"""the project's name"""
self.version = version
"""the project's version (incremented on every save in LSDJ)"""
self.size_blks = size_blks
"""the size of the song in filesystem blocks"""
# Useful for applications tracking whether a project was modified since
# it was loaded.
self.modified = False
# Since parsing the song is expensive, we'll load it lazily from the
# raw data on-demand
self.__song_data = None
self._song = None
self._raw_bytes = data
@property
def _song_data(self):
if self.__song_data is None:
self.__song_data = bread.parse(self._raw_bytes, spec.song)
return self.__song_data
@_song_data.setter
def _song_data(self, value):
self.__song_data = value
@property
def song(self):
"""the song associated with the project"""
if self._song is None:
self._song = Song(self._song_data)
return self._song
@song.setter
def song(self, value):
self._song = value
def get_raw_data(self):
return bread.write(self._song_data, spec.song)
def save(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead
"""
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data))
def save_lsdsng(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
"""
return self.save(filename)
def save_srm(self, filename):
"""Save a project in .srm format to the target file.
:param filename: the name of the file to which to save
"""
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data)
def __eq__(self, other):
if other is None:
return False
else:
return self._song_data == other._song_data
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s, %d>\n" % (self.name, self.version)
|
alexras/pylsdj | pylsdj/project.py | Project.song | python | def song(self):
if self._song is None:
self._song = Song(self._song_data)
return self._song | the song associated with the project | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L119-L124 | null | class Project(object):
def __init__(self, name, version, size_blks, data):
self.name = name
"""the project's name"""
self.version = version
"""the project's version (incremented on every save in LSDJ)"""
self.size_blks = size_blks
"""the size of the song in filesystem blocks"""
# Useful for applications tracking whether a project was modified since
# it was loaded.
self.modified = False
# Since parsing the song is expensive, we'll load it lazily from the
# raw data on-demand
self.__song_data = None
self._song = None
self._raw_bytes = data
@property
def _song_data(self):
if self.__song_data is None:
self.__song_data = bread.parse(self._raw_bytes, spec.song)
return self.__song_data
@_song_data.setter
def _song_data(self, value):
self.__song_data = value
@property
@song.setter
def song(self, value):
self._song = value
def get_raw_data(self):
return bread.write(self._song_data, spec.song)
def save(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead
"""
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data))
def save_lsdsng(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
"""
return self.save(filename)
def save_srm(self, filename):
"""Save a project in .srm format to the target file.
:param filename: the name of the file to which to save
"""
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data)
def __eq__(self, other):
if other is None:
return False
else:
return self._song_data == other._song_data
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s, %d>\n" % (self.name, self.version)
|
alexras/pylsdj | pylsdj/project.py | Project.save | python | def save(self, filename):
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data)) | Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L133-L159 | [
"def compress(raw_data):\n \"\"\"Compress raw bytes with the filepack algorithm.\n\n :param raw_data: an array of raw data bytes to compress\n\n :rtype: a list of compressed bytes\n \"\"\"\n raw_data = bytearray(raw_data)\n compressed_data = []\n\n data_size = len(raw_data)\n\n index = 0\n next_bytes = [-1, -1, -1]\n\n def is_default_instrument(index):\n if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):\n return False\n\n instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]\n\n if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:\n return False\n\n return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK\n\n def is_default_wave(index):\n return (index + len(DEFAULT_WAVE) <= len(raw_data) and\n raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)\n\n while index < data_size:\n current_byte = raw_data[index]\n\n for i in range(3):\n if index < data_size - (i + 1):\n next_bytes[i] = raw_data[index + (i + 1)]\n else:\n next_bytes[i] = -1\n\n if current_byte == RLE_BYTE:\n compressed_data.append(RLE_BYTE)\n compressed_data.append(RLE_BYTE)\n index += 1\n elif current_byte == SPECIAL_BYTE:\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(SPECIAL_BYTE)\n index += 1\n elif is_default_instrument(index):\n counter = 1\n index += len(DEFAULT_INSTRUMENT_FILEPACK)\n\n while (is_default_instrument(index) and\n counter < 0x100):\n counter += 1\n index += len(DEFAULT_INSTRUMENT_FILEPACK)\n\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(DEFAULT_INSTR_BYTE)\n compressed_data.append(counter)\n\n elif is_default_wave(index):\n counter = 1\n index += len(DEFAULT_WAVE)\n\n while is_default_wave(index) and counter < 0xff:\n counter += 1\n index += len(DEFAULT_WAVE)\n\n compressed_data.append(SPECIAL_BYTE)\n compressed_data.append(DEFAULT_WAVE_BYTE)\n compressed_data.append(counter)\n\n elif (current_byte == next_bytes[0] and\n next_bytes[0] == next_bytes[1] and\n next_bytes[1] == next_bytes[2]):\n # Do RLE compression\n\n compressed_data.append(RLE_BYTE)\n compressed_data.append(current_byte)\n counter = 0\n\n while (index < data_size and\n raw_data[index] == current_byte and\n counter < 0xff):\n index += 1\n counter += 1\n\n compressed_data.append(counter)\n else:\n compressed_data.append(current_byte)\n index += 1\n\n return compressed_data\n",
"def write(self, compressed_data, factory):\n \"\"\"Splits a compressed byte stream into blocks.\n \"\"\"\n return filepack.split(compressed_data, BLOCK_SIZE,\n factory)\n",
"def get_raw_data(self):\n return bread.write(self._song_data, spec.song)\n"
] | class Project(object):
def __init__(self, name, version, size_blks, data):
self.name = name
"""the project's name"""
self.version = version
"""the project's version (incremented on every save in LSDJ)"""
self.size_blks = size_blks
"""the size of the song in filesystem blocks"""
# Useful for applications tracking whether a project was modified since
# it was loaded.
self.modified = False
# Since parsing the song is expensive, we'll load it lazily from the
# raw data on-demand
self.__song_data = None
self._song = None
self._raw_bytes = data
@property
def _song_data(self):
if self.__song_data is None:
self.__song_data = bread.parse(self._raw_bytes, spec.song)
return self.__song_data
@_song_data.setter
def _song_data(self, value):
self.__song_data = value
@property
def song(self):
"""the song associated with the project"""
if self._song is None:
self._song = Song(self._song_data)
return self._song
@song.setter
def song(self, value):
self._song = value
def get_raw_data(self):
return bread.write(self._song_data, spec.song)
def save_lsdsng(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
"""
return self.save(filename)
def save_srm(self, filename):
"""Save a project in .srm format to the target file.
:param filename: the name of the file to which to save
"""
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data)
def __eq__(self, other):
if other is None:
return False
else:
return self._song_data == other._song_data
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s, %d>\n" % (self.name, self.version)
|
alexras/pylsdj | pylsdj/project.py | Project.save_srm | python | def save_srm(self, filename):
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data) | Save a project in .srm format to the target file.
:param filename: the name of the file to which to save | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L168-L175 | null | class Project(object):
def __init__(self, name, version, size_blks, data):
self.name = name
"""the project's name"""
self.version = version
"""the project's version (incremented on every save in LSDJ)"""
self.size_blks = size_blks
"""the size of the song in filesystem blocks"""
# Useful for applications tracking whether a project was modified since
# it was loaded.
self.modified = False
# Since parsing the song is expensive, we'll load it lazily from the
# raw data on-demand
self.__song_data = None
self._song = None
self._raw_bytes = data
@property
def _song_data(self):
if self.__song_data is None:
self.__song_data = bread.parse(self._raw_bytes, spec.song)
return self.__song_data
@_song_data.setter
def _song_data(self, value):
self.__song_data = value
@property
def song(self):
"""the song associated with the project"""
if self._song is None:
self._song = Song(self._song_data)
return self._song
@song.setter
def song(self, value):
self._song = value
def get_raw_data(self):
return bread.write(self._song_data, spec.song)
def save(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead
"""
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data))
def save_lsdsng(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
"""
return self.save(filename)
def __eq__(self, other):
if other is None:
return False
else:
return self._song_data == other._song_data
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s, %d>\n" % (self.name, self.version)
|
alexras/pylsdj | pylsdj/synth.py | Synth.phase_type | python | def phase_type(self, value):
'''compresses the waveform horizontally; one of
``"normal"``, ``"resync"``, ``"resync2"``'''
self._params.phase_type = value
self._overwrite_lock.disable() | compresses the waveform horizontally; one of
``"normal"``, ``"resync"``, ``"resync2"`` | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/synth.py#L186-L190 | null | class Synth(object):
def __init__(self, song, index):
self._song = song
self._index = index
self._overwrite_lock = WaveSynthOverwriteLock(song, index)
self._params = self._song.song_data.softsynth_params[index]
self._start = SynthSoundParams(
self._song.song_data.softsynth_params[index].start,
self._overwrite_lock)
self._end = SynthSoundParams(
self._song.song_data.softsynth_params[index].end,
self._overwrite_lock)
self._waves = Waves(song, index, self._overwrite_lock)
@property
def song(self):
"""the synth's parent Song"""
return self._song
@property
def index(self):
"""the synth's index within its parent song's synth table"""
return self._index
@property
def start(self):
"""parameters for the start of the sound, represented as a
SynthSoundParams object"""
return self._start
@property
def end(self):
"""parameters for the end of the sound, represented as a
SynthSoundParams object"""
return self._end
@property
def waveform(self):
'''the synth\'s waveform type; one of ``"sawtooth"``,
``"square"``, ``"sine"``'''
return self._params.waveform
@waveform.setter
def waveform(self, value):
self._params.waveform = value
self._overwrite_lock.disable()
@property
def filter_type(self):
'''the type of filter applied to the waveform; one of
``"lowpass"``, ``"highpass"``, ``"bandpass"``, ``"allpass"``'''
return self._params.filter_type
@filter_type.setter
def filter_type(self, value):
self._params.filter_type = value
self._overwrite_lock.disable()
@property
def filter_resonance(self):
"""boosts the signal around the cutoff
frequency, to change how bright or dull the wave sounds"""
return self._params.filter_resonance
@filter_resonance.setter
def filter_resonance(self, value):
self._params.filter_resonance = value
self._overwrite_lock.disable()
@property
def distortion(self):
'''use ``"clip"`` or ``"wrap"`` distortion'''
return self._params.distortion
@distortion.setter
def distortion(self, value):
self._params.distortion = value
self._overwrite_lock.disable()
@property
def phase_type(self):
'''compresses the waveform horizontally; one of
``"normal"``, ``"resync"``, ``"resync2"``'''
return self._params.phase_type
@phase_type.setter
@property
def waves(self):
"""a list of the synth's waveforms, each of which is a list of bytes"""
return self._waves
@property
def wave_synth_overwrite_lock(self):
"""if True, the synth's waveforms override its synth parameters;
if False, its synth parameters override its waveforms"""
return self._overwrite_lock.status()
def export(self):
export_struct = {}
export_struct["params"] = json.loads(self._params.as_json())
export_struct["waves"] = []
for wave in self.waves:
export_struct["waves"].append(list(wave))
return export_struct
def import_lsdinst(self, synth_data):
import_keys = ['start', 'end', 'waveform', 'filter_type',
'filter_resonance', 'distortion', 'phase_type']
for key in import_keys:
value = synth_data['params'][key]
if key in ('start', 'end'):
getattr(self, key).import_lsdinst(value)
else:
setattr(self, key, value)
for i, wave in enumerate(synth_data['waves']):
for j, frame in enumerate(wave):
self.waves[i][j] = frame
|
alexras/pylsdj | pylsdj/savfile.py | SAVFile.project_list | python | def project_list(self):
return [(i, self.projects[i]) for i in sorted(self.projects.keys())] | The list of :py:class:`pylsdj.Project` s that the
.sav file contains | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/savfile.py#L207-L210 | null | class SAVFile(object):
# Start offset of SAV file contents
START_OFFSET = 0x8000
HEADER_EMPTY_SECTION_1 = (0x8120, 0x813d)
# Offset of SRAM initialization check
SRAM_INIT_CHECK_OFFSET = 0x813e
SRAM_INIT_CHECK_LENGTH = 2
# Offset where active file number appears
ACTIVE_FILE_NUMBER_OFFSET = 0x8140
# Start of block allocation table
BAT_START_OFFSET = 0x8141
# End of block allocation table
BAT_END_OFFSET = 0x81ff
# Max length in bytes of filename
FILENAME_LENGTH = 8
# Length in bytes of file version number
FILE_VERSION_LENGTH = 1
# Length in bytes of file number
FILE_NUMBER_LENGTH = 1
def __init__(self, filename, callback=_noop_callback):
"""Constructor.
:param filename: the file to open
:type name: str
:param callback: a progress callback function
:type name: function
"""
self.filename = filename
with open(filename, 'rb') as fp:
self._load(fp, callback)
def _load(self, fp, callback):
# read preamble + decompress blocks + "all done"
total_steps = 3
current_step = 0
callback("Reading preamble", current_step, total_steps, True)
self.preamble = fp.read(self.START_OFFSET)
header_block_data = fp.read(blockutils.BLOCK_SIZE)
try:
self.header_block = bread.parse(
header_block_data, bread_spec.compressed_sav_file)
except bitstring.ReadError as e:
raise exceptions.ImportException(e)
if self.header_block.sram_init_check != b'jk':
error_msg = (
"SRAM init check bits incorrect (should be 'jk', was '%s')" %
(self.header_block.sram_init_check))
callback(error_msg, current_step, total_steps, False)
raise ValueError(error_msg)
self.active_project_number = self.header_block.active_file
current_step += 1
callback("Decompressing", current_step, total_steps, True)
self.projects = ProjectList(self.filename, self.header_block)
current_step += 1
callback("Import complete!", total_steps, total_steps, True)
def __str__(self):
output_str = ''
def add_line(line):
output_str += line + '\n'
str_stream = StringIO()
for i in range(NUM_FILES):
project = self.projects[i]
if project is not None:
add_line(str(project), file=str_stream)
add_line("Active Project: %s" % \
(self.projects[self.active_project_number]), file=str_stream)
return output_str
def __eq__(self, other):
return self.projects == other.projects
@property
def _save(self, fp, callback):
# Marshal 32 possible projects + write preamble + write data + "all
# done"
total_steps = 35
current_step = 0
writer = BlockWriter()
factory = BlockFactory()
# Block allocation table doesn't include header block because it's
# always in use, so have to add additional block to account for header
num_blocks = self.BAT_END_OFFSET - self.BAT_START_OFFSET + 2
header_block = factory.new_block()
block_table = []
for i in range(num_blocks):
block_table.append(None)
# First block is the header block, so we should ignore it when creating
# the block allocation table
block_table[0] = -1
for i in range(NUM_FILES):
project = self.projects[i]
current_step += 1
if project is None:
continue
callback("Marshaling song '%s'" %
(utils.name_without_zeroes(project.name)),
current_step - 1, total_steps, True)
raw_data = project.get_raw_data()
compressed_data = filepack.compress(raw_data)
project_block_ids = writer.write(compressed_data, factory)
for b in project_block_ids:
block_table[b] = i
callback("Writing preamble and constructing header block",
current_step, total_steps, True)
current_step += 1
# Bytes up to START_OFFSET will remain the same
fp.write(self.preamble)
# Set header block filenames and versions
empty_project_name = '\0' * self.FILENAME_LENGTH
for i in range(NUM_FILES):
project = self.projects[i]
if project is None:
self.header_block.filenames[i] = empty_project_name
self.header_block.file_versions[i] = 0
else:
self.header_block.filenames[i] = project.name
self.header_block.file_versions[i] = project.version
self.header_block.active_file = self.active_project_number
# Ignore the header block when serializing the block allocation table
for i, b in enumerate(block_table[1:]):
if b is None:
file_no = EMPTY_BLOCK
else:
file_no = b
self.header_block.block_alloc_table[i] = file_no
header_block.data = bread.write(
self.header_block, bread_spec.compressed_sav_file)
assert len(header_block.data) == blockutils.BLOCK_SIZE, \
"Header block isn't the expected length; expected 0x%x, got 0x%x" \
% (blockutils.BLOCK_SIZE, len(header_block.data))
block_map = factory.blocks
empty_block_data = []
for i in range(blockutils.BLOCK_SIZE):
empty_block_data.append(0)
callback("Writing data to file", current_step, total_steps, True)
current_step += 1
for i in range(num_blocks):
if i in block_map:
data_list = block_map[i].data
else:
data_list = empty_block_data
fp.write(bytearray(data_list))
callback("Save complete!", total_steps, total_steps, True)
def save(self, filename, callback=_noop_callback):
"""Save this file.
:param filename: the file to which to save the .sav file
:type filename: str
:param callback: a progress callback function
:type callback: function
"""
with open(filename, 'wb') as fp:
self._save(fp, callback)
|
alexras/pylsdj | pylsdj/savfile.py | SAVFile.save | python | def save(self, filename, callback=_noop_callback):
with open(filename, 'wb') as fp:
self._save(fp, callback) | Save this file.
:param filename: the file to which to save the .sav file
:type filename: str
:param callback: a progress callback function
:type callback: function | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/savfile.py#L312-L321 | [
"def _save(self, fp, callback):\n # Marshal 32 possible projects + write preamble + write data + \"all\n # done\"\n total_steps = 35\n current_step = 0\n\n writer = BlockWriter()\n factory = BlockFactory()\n\n # Block allocation table doesn't include header block because it's\n # always in use, so have to add additional block to account for header\n num_blocks = self.BAT_END_OFFSET - self.BAT_START_OFFSET + 2\n\n header_block = factory.new_block()\n\n block_table = []\n\n for i in range(num_blocks):\n block_table.append(None)\n\n # First block is the header block, so we should ignore it when creating\n # the block allocation table\n block_table[0] = -1\n\n for i in range(NUM_FILES):\n project = self.projects[i]\n\n current_step += 1\n\n if project is None:\n continue\n\n callback(\"Marshaling song '%s'\" %\n (utils.name_without_zeroes(project.name)),\n current_step - 1, total_steps, True)\n\n raw_data = project.get_raw_data()\n compressed_data = filepack.compress(raw_data)\n\n project_block_ids = writer.write(compressed_data, factory)\n\n for b in project_block_ids:\n block_table[b] = i\n\n callback(\"Writing preamble and constructing header block\",\n current_step, total_steps, True)\n current_step += 1\n # Bytes up to START_OFFSET will remain the same\n fp.write(self.preamble)\n\n # Set header block filenames and versions\n\n empty_project_name = '\\0' * self.FILENAME_LENGTH\n\n for i in range(NUM_FILES):\n project = self.projects[i]\n\n if project is None:\n self.header_block.filenames[i] = empty_project_name\n self.header_block.file_versions[i] = 0\n else:\n self.header_block.filenames[i] = project.name\n self.header_block.file_versions[i] = project.version\n\n self.header_block.active_file = self.active_project_number\n\n # Ignore the header block when serializing the block allocation table\n for i, b in enumerate(block_table[1:]):\n if b is None:\n file_no = EMPTY_BLOCK\n else:\n file_no = b\n\n self.header_block.block_alloc_table[i] = file_no\n\n header_block.data = bread.write(\n self.header_block, bread_spec.compressed_sav_file)\n\n assert len(header_block.data) == blockutils.BLOCK_SIZE, \\\n \"Header block isn't the expected length; expected 0x%x, got 0x%x\" \\\n % (blockutils.BLOCK_SIZE, len(header_block.data))\n\n block_map = factory.blocks\n\n empty_block_data = []\n for i in range(blockutils.BLOCK_SIZE):\n empty_block_data.append(0)\n\n callback(\"Writing data to file\", current_step, total_steps, True)\n current_step += 1\n for i in range(num_blocks):\n if i in block_map:\n data_list = block_map[i].data\n else:\n data_list = empty_block_data\n\n fp.write(bytearray(data_list))\n\n callback(\"Save complete!\", total_steps, total_steps, True)\n"
] | class SAVFile(object):
# Start offset of SAV file contents
START_OFFSET = 0x8000
HEADER_EMPTY_SECTION_1 = (0x8120, 0x813d)
# Offset of SRAM initialization check
SRAM_INIT_CHECK_OFFSET = 0x813e
SRAM_INIT_CHECK_LENGTH = 2
# Offset where active file number appears
ACTIVE_FILE_NUMBER_OFFSET = 0x8140
# Start of block allocation table
BAT_START_OFFSET = 0x8141
# End of block allocation table
BAT_END_OFFSET = 0x81ff
# Max length in bytes of filename
FILENAME_LENGTH = 8
# Length in bytes of file version number
FILE_VERSION_LENGTH = 1
# Length in bytes of file number
FILE_NUMBER_LENGTH = 1
def __init__(self, filename, callback=_noop_callback):
"""Constructor.
:param filename: the file to open
:type name: str
:param callback: a progress callback function
:type name: function
"""
self.filename = filename
with open(filename, 'rb') as fp:
self._load(fp, callback)
def _load(self, fp, callback):
# read preamble + decompress blocks + "all done"
total_steps = 3
current_step = 0
callback("Reading preamble", current_step, total_steps, True)
self.preamble = fp.read(self.START_OFFSET)
header_block_data = fp.read(blockutils.BLOCK_SIZE)
try:
self.header_block = bread.parse(
header_block_data, bread_spec.compressed_sav_file)
except bitstring.ReadError as e:
raise exceptions.ImportException(e)
if self.header_block.sram_init_check != b'jk':
error_msg = (
"SRAM init check bits incorrect (should be 'jk', was '%s')" %
(self.header_block.sram_init_check))
callback(error_msg, current_step, total_steps, False)
raise ValueError(error_msg)
self.active_project_number = self.header_block.active_file
current_step += 1
callback("Decompressing", current_step, total_steps, True)
self.projects = ProjectList(self.filename, self.header_block)
current_step += 1
callback("Import complete!", total_steps, total_steps, True)
def __str__(self):
output_str = ''
def add_line(line):
output_str += line + '\n'
str_stream = StringIO()
for i in range(NUM_FILES):
project = self.projects[i]
if project is not None:
add_line(str(project), file=str_stream)
add_line("Active Project: %s" % \
(self.projects[self.active_project_number]), file=str_stream)
return output_str
def __eq__(self, other):
return self.projects == other.projects
@property
def project_list(self):
"""The list of :py:class:`pylsdj.Project` s that the
.sav file contains"""
return [(i, self.projects[i]) for i in sorted(self.projects.keys())]
def _save(self, fp, callback):
# Marshal 32 possible projects + write preamble + write data + "all
# done"
total_steps = 35
current_step = 0
writer = BlockWriter()
factory = BlockFactory()
# Block allocation table doesn't include header block because it's
# always in use, so have to add additional block to account for header
num_blocks = self.BAT_END_OFFSET - self.BAT_START_OFFSET + 2
header_block = factory.new_block()
block_table = []
for i in range(num_blocks):
block_table.append(None)
# First block is the header block, so we should ignore it when creating
# the block allocation table
block_table[0] = -1
for i in range(NUM_FILES):
project = self.projects[i]
current_step += 1
if project is None:
continue
callback("Marshaling song '%s'" %
(utils.name_without_zeroes(project.name)),
current_step - 1, total_steps, True)
raw_data = project.get_raw_data()
compressed_data = filepack.compress(raw_data)
project_block_ids = writer.write(compressed_data, factory)
for b in project_block_ids:
block_table[b] = i
callback("Writing preamble and constructing header block",
current_step, total_steps, True)
current_step += 1
# Bytes up to START_OFFSET will remain the same
fp.write(self.preamble)
# Set header block filenames and versions
empty_project_name = '\0' * self.FILENAME_LENGTH
for i in range(NUM_FILES):
project = self.projects[i]
if project is None:
self.header_block.filenames[i] = empty_project_name
self.header_block.file_versions[i] = 0
else:
self.header_block.filenames[i] = project.name
self.header_block.file_versions[i] = project.version
self.header_block.active_file = self.active_project_number
# Ignore the header block when serializing the block allocation table
for i, b in enumerate(block_table[1:]):
if b is None:
file_no = EMPTY_BLOCK
else:
file_no = b
self.header_block.block_alloc_table[i] = file_no
header_block.data = bread.write(
self.header_block, bread_spec.compressed_sav_file)
assert len(header_block.data) == blockutils.BLOCK_SIZE, \
"Header block isn't the expected length; expected 0x%x, got 0x%x" \
% (blockutils.BLOCK_SIZE, len(header_block.data))
block_map = factory.blocks
empty_block_data = []
for i in range(blockutils.BLOCK_SIZE):
empty_block_data.append(0)
callback("Writing data to file", current_step, total_steps, True)
current_step += 1
for i in range(num_blocks):
if i in block_map:
data_list = block_map[i].data
else:
data_list = empty_block_data
fp.write(bytearray(data_list))
callback("Save complete!", total_steps, total_steps, True)
|
alexras/pylsdj | pylsdj/filepack.py | split | python | def split(compressed_data, segment_size, block_factory):
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids | Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L46-L138 | [
"def pad(segment, size):\n \"\"\"Add zeroes to a segment until it reaches a certain size.\n\n :param segment: the segment to pad\n :param size: the size to which to pad the segment\n \"\"\"\n for i in range(size - len(segment)):\n segment.append(0)\n\n assert len(segment) == size\n",
"def add_eof(segment):\n \"\"\"Add an EOF statement to a block.\"\"\"\n segment.extend([SPECIAL_BYTE, EOF_BYTE])\n",
"def add_block_switch(segment, block_id):\n \"\"\"Add a block switch statement to a block.\n\n :param segment: the segment to which to add the statement\n :param block_id: the block ID to which the switch statement should switch\n \"\"\"\n segment.extend([SPECIAL_BYTE, block_id])\n",
"def new_block(self):\n block = Block(self.max_id, [])\n self.blocks[self.max_id] = block\n self.max_id += 1\n\n return block\n"
] | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
"""
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
"""
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data
|
alexras/pylsdj | pylsdj/filepack.py | renumber_block_keys | python | def renumber_block_keys(blocks):
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map | Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L141-L201 | null | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
"""
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
"""
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
"""
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data
|
alexras/pylsdj | pylsdj/filepack.py | merge | python | def merge(blocks):
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data | Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L204-L259 | null | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
"""
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
"""
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
"""
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data
|
alexras/pylsdj | pylsdj/filepack.py | pad | python | def pad(segment, size):
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size | Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L276-L285 | null | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
"""
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
"""
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data
|
alexras/pylsdj | pylsdj/filepack.py | decompress | python | def decompress(compressed_data):
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data | Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L288-L338 | null | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
"""
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
"""
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
"""
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data
|
alexras/pylsdj | pylsdj/filepack.py | compress | python | def compress(raw_data):
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data | Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/filepack.py#L341-L433 | [
"def is_default_instrument(index):\n if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):\n return False\n\n instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]\n\n if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:\n return False\n\n return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK\n",
"def is_default_wave(index):\n return (index + len(DEFAULT_WAVE) <= len(raw_data) and\n raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)\n"
] | import itertools
from .vendor.six.moves import range
# Byte used to denote run-length encoding
RLE_BYTE = 0xc0
# Byte used to denote special action
SPECIAL_BYTE = 0xe0
# Byte used to denote end of file (appears after special byte)
EOF_BYTE = 0xff
# Byte used to denote default instrument
DEFAULT_INSTR_BYTE = 0xf1
# Byte used to denote default wave
DEFAULT_WAVE_BYTE = 0xf0
DEFAULT_WAVE = bytearray(
[0x8e, 0xcd, 0xcc, 0xbb, 0xaa, 0xa9, 0x99, 0x88, 0x87, 0x76,
0x66, 0x55, 0x54, 0x43, 0x32, 0x31])
DEFAULT_INSTRUMENT_FILEPACK = bytearray([
0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0])
DEFAULT_INSTRUMENT = bytearray([
0, 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0])
# DEFAULT_INSTRUMENT = [
# 0xa8, 0, 0, 0xff, 0, 0, 3, 0, 0, 0xd0, 0, 0, 0, 0xf3, 0, 0]
RESERVED_BYTES = [SPECIAL_BYTE, RLE_BYTE]
SPECIAL_DEFAULTS = [DEFAULT_INSTR_BYTE, DEFAULT_WAVE_BYTE]
STATE_BYTES = 0
STATE_RLE_BYTE = 1
STATE_RLE_COUNT = 2
STATE_SPECIAL_BYTE = 3
STATE_DEFAULT_INSTR = 4
STATE_DEFAULT_WAVE = 5
STATE_DONE = 6
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
"""
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
"""
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
"""
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data
def add_eof(segment):
"""Add an EOF statement to a block."""
segment.extend([SPECIAL_BYTE, EOF_BYTE])
def add_block_switch(segment, block_id):
"""Add a block switch statement to a block.
:param segment: the segment to which to add the statement
:param block_id: the block ID to which the switch statement should switch
"""
segment.extend([SPECIAL_BYTE, block_id])
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
"""
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes"""
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data
|
alexras/pylsdj | pylsdj/utils.py | name_without_zeroes | python | def name_without_zeroes(name):
first_zero = name.find(b'\0')
if first_zero == -1:
return name
else:
return str(name[:first_zero]) | Return a human-readable name without LSDJ's trailing zeroes.
:param name: the name from which to strip zeroes
:rtype: the name, without trailing zeroes | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/utils.py#L32-L44 | null | import os
import tempfile
from .vendor.six.moves import range
def printable_decimal_and_hex(num):
return "{0:d} (0x{0:x})".format(num)
def assert_index_sane(index, upper_bound_exclusive):
assert type(index) == int, "Indices should be integers; '%s' is not" % (
index)
assert 0 <= index < upper_bound_exclusive, (
"Index %d out of range [%d, %d)" % (index, 0, upper_bound_exclusive))
class ObjectLookupDict(object):
def __init__(self, id_list, object_list):
self.id_list = id_list
self.object_list = object_list
def __getitem__(self, index):
assert_index_sane(index, len(self.id_list))
return self.object_list[self.id_list[index]]
def __setitem__(self, index, value):
assert_index_sane(index, len(self.id_list))
self.id_list[index] = value.index
class temporary_file:
def __enter__(self):
(tmp_handle, tmp_abspath) = tempfile.mkstemp()
os.close(tmp_handle)
self.abspath = tmp_abspath
return self.abspath
def __exit__(self, t, value, traceback):
if hasattr(self, 'abspath') and self.abspath is not None:
os.unlink(self.abspath)
def fixed_width_string(string, width, fill=' '):
return string[:width].ljust(fill)
|
alexras/pylsdj | pylsdj/instrument.py | Instrument.name | python | def name(self):
instr_name = self.song.song_data.instrument_names[self.index]
if type(instr_name) == bytes:
instr_name = instr_name.decode('utf-8')
return instr_name | the instrument's name (5 characters, zero-padded) | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L21-L28 | null | class Instrument(object):
def __init__(self, song, index):
self.song = song
self.data = song.song_data.instruments[index]
self.index = index
def __eq__(self, other):
return (isinstance(other, Instrument) and
self.name == other.name and
self.type == other.type and
self.table == other.table and
self.automate == other.automate and
self.pan == other.pan)
@property
@name.setter
def name(self, val):
if type(val) != bytes:
val = val.encode('utf-8')
self.song.song_data.instrument_names[self.index] = val
@property
def type(self):
"""the instrument's type (``pulse``, ``wave``, ``kit`` or ``noise``)"""
return self.data.instrument_type
@type.setter
def type(self, value):
self.data.instrument_type = value
@property
def table(self):
"""a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table"""
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table]
@table.setter
def table(self, value):
if not hasattr(self.data, "table_on"):
raise ValueError("This instrument doesn't have a table")
self.data.table_on = True
self.data.table = value.index
@property
def automate(self):
"""if True, automation is on"""
return self.data.automate
@automate.setter
def automate(self, value):
self.data.automate = value
@property
def pan(self):
return self.data.pan
@pan.setter
def pan(self, value):
self.data.pan = value
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct"""
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data)
def export_to_file(self, filename):
"""Export this instrument's settings to a file.
:param filename: the name of the file
"""
instr_json = self.export_struct()
with open(filename, 'w') as fp:
json.dump(instr_json, fp, indent=2)
def export_struct(self):
export_struct = {}
export_struct['name'] = self.name
export_struct['data'] = {}
data_json = json.loads(self.data.as_json())
for key, value in list(data_json.items()):
if key[0] != '_' and key not in ('synth', 'table'):
export_struct['data'][key] = value
if self.table is not None:
export_struct['table'] = self.table.export()
return export_struct
|
alexras/pylsdj | pylsdj/instrument.py | Instrument.table | python | def table(self):
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table] | a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L47-L52 | [
"def assert_index_sane(index, upper_bound_exclusive):\n assert type(index) == int, \"Indices should be integers; '%s' is not\" % (\n index)\n assert 0 <= index < upper_bound_exclusive, (\n \"Index %d out of range [%d, %d)\" % (index, 0, upper_bound_exclusive))\n"
] | class Instrument(object):
def __init__(self, song, index):
self.song = song
self.data = song.song_data.instruments[index]
self.index = index
def __eq__(self, other):
return (isinstance(other, Instrument) and
self.name == other.name and
self.type == other.type and
self.table == other.table and
self.automate == other.automate and
self.pan == other.pan)
@property
def name(self):
"""the instrument's name (5 characters, zero-padded)"""
instr_name = self.song.song_data.instrument_names[self.index]
if type(instr_name) == bytes:
instr_name = instr_name.decode('utf-8')
return instr_name
@name.setter
def name(self, val):
if type(val) != bytes:
val = val.encode('utf-8')
self.song.song_data.instrument_names[self.index] = val
@property
def type(self):
"""the instrument's type (``pulse``, ``wave``, ``kit`` or ``noise``)"""
return self.data.instrument_type
@type.setter
def type(self, value):
self.data.instrument_type = value
@property
@table.setter
def table(self, value):
if not hasattr(self.data, "table_on"):
raise ValueError("This instrument doesn't have a table")
self.data.table_on = True
self.data.table = value.index
@property
def automate(self):
"""if True, automation is on"""
return self.data.automate
@automate.setter
def automate(self, value):
self.data.automate = value
@property
def pan(self):
return self.data.pan
@pan.setter
def pan(self, value):
self.data.pan = value
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct"""
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data)
def export_to_file(self, filename):
"""Export this instrument's settings to a file.
:param filename: the name of the file
"""
instr_json = self.export_struct()
with open(filename, 'w') as fp:
json.dump(instr_json, fp, indent=2)
def export_struct(self):
export_struct = {}
export_struct['name'] = self.name
export_struct['data'] = {}
data_json = json.loads(self.data.as_json())
for key, value in list(data_json.items()):
if key[0] != '_' and key not in ('synth', 'table'):
export_struct['data'][key] = value
if self.table is not None:
export_struct['table'] = self.table.export()
return export_struct
|
alexras/pylsdj | pylsdj/instrument.py | Instrument.import_lsdinst | python | def import_lsdinst(self, struct_data):
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data) | import from an lsdinst struct | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L79-L86 | null | class Instrument(object):
def __init__(self, song, index):
self.song = song
self.data = song.song_data.instruments[index]
self.index = index
def __eq__(self, other):
return (isinstance(other, Instrument) and
self.name == other.name and
self.type == other.type and
self.table == other.table and
self.automate == other.automate and
self.pan == other.pan)
@property
def name(self):
"""the instrument's name (5 characters, zero-padded)"""
instr_name = self.song.song_data.instrument_names[self.index]
if type(instr_name) == bytes:
instr_name = instr_name.decode('utf-8')
return instr_name
@name.setter
def name(self, val):
if type(val) != bytes:
val = val.encode('utf-8')
self.song.song_data.instrument_names[self.index] = val
@property
def type(self):
"""the instrument's type (``pulse``, ``wave``, ``kit`` or ``noise``)"""
return self.data.instrument_type
@type.setter
def type(self, value):
self.data.instrument_type = value
@property
def table(self):
"""a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table"""
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table]
@table.setter
def table(self, value):
if not hasattr(self.data, "table_on"):
raise ValueError("This instrument doesn't have a table")
self.data.table_on = True
self.data.table = value.index
@property
def automate(self):
"""if True, automation is on"""
return self.data.automate
@automate.setter
def automate(self, value):
self.data.automate = value
@property
def pan(self):
return self.data.pan
@pan.setter
def pan(self, value):
self.data.pan = value
def export_to_file(self, filename):
"""Export this instrument's settings to a file.
:param filename: the name of the file
"""
instr_json = self.export_struct()
with open(filename, 'w') as fp:
json.dump(instr_json, fp, indent=2)
def export_struct(self):
export_struct = {}
export_struct['name'] = self.name
export_struct['data'] = {}
data_json = json.loads(self.data.as_json())
for key, value in list(data_json.items()):
if key[0] != '_' and key not in ('synth', 'table'):
export_struct['data'][key] = value
if self.table is not None:
export_struct['table'] = self.table.export()
return export_struct
|
alexras/pylsdj | pylsdj/instrument.py | Instrument.export_to_file | python | def export_to_file(self, filename):
instr_json = self.export_struct()
with open(filename, 'w') as fp:
json.dump(instr_json, fp, indent=2) | Export this instrument's settings to a file.
:param filename: the name of the file | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L88-L96 | [
"def export_struct(self):\n export_struct = {}\n\n export_struct['name'] = self.name\n export_struct['data'] = {}\n\n data_json = json.loads(self.data.as_json())\n\n for key, value in list(data_json.items()):\n if key[0] != '_' and key not in ('synth', 'table'):\n export_struct['data'][key] = value\n\n if self.table is not None:\n export_struct['table'] = self.table.export()\n\n return export_struct\n"
] | class Instrument(object):
def __init__(self, song, index):
self.song = song
self.data = song.song_data.instruments[index]
self.index = index
def __eq__(self, other):
return (isinstance(other, Instrument) and
self.name == other.name and
self.type == other.type and
self.table == other.table and
self.automate == other.automate and
self.pan == other.pan)
@property
def name(self):
"""the instrument's name (5 characters, zero-padded)"""
instr_name = self.song.song_data.instrument_names[self.index]
if type(instr_name) == bytes:
instr_name = instr_name.decode('utf-8')
return instr_name
@name.setter
def name(self, val):
if type(val) != bytes:
val = val.encode('utf-8')
self.song.song_data.instrument_names[self.index] = val
@property
def type(self):
"""the instrument's type (``pulse``, ``wave``, ``kit`` or ``noise``)"""
return self.data.instrument_type
@type.setter
def type(self, value):
self.data.instrument_type = value
@property
def table(self):
"""a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table"""
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table]
@table.setter
def table(self, value):
if not hasattr(self.data, "table_on"):
raise ValueError("This instrument doesn't have a table")
self.data.table_on = True
self.data.table = value.index
@property
def automate(self):
"""if True, automation is on"""
return self.data.automate
@automate.setter
def automate(self, value):
self.data.automate = value
@property
def pan(self):
return self.data.pan
@pan.setter
def pan(self, value):
self.data.pan = value
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct"""
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data)
def export_struct(self):
export_struct = {}
export_struct['name'] = self.name
export_struct['data'] = {}
data_json = json.loads(self.data.as_json())
for key, value in list(data_json.items()):
if key[0] != '_' and key not in ('synth', 'table'):
export_struct['data'][key] = value
if self.table is not None:
export_struct['table'] = self.table.export()
return export_struct
|
alexras/pylsdj | pylsdj/kits.py | KitSample.write_wav | python | def write_wav(self, filename):
wave_output = None
try:
wave_output = wave.open(filename, 'w')
wave_output.setparams(WAVE_PARAMS)
frames = bytearray([x << 4 for x in self.sample_data])
wave_output.writeframes(frames)
finally:
if wave_output is not None:
wave_output.close() | Write this sample to a WAV file.
:param filename: the file to which to write | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/kits.py#L229-L246 | null | class KitSample(object):
def __init__(self, data, index):
self._data = data
self.index = index
# Because of data layout, indices for bits are
#
# 7, 6, 5, ..., 0, unused, 14, 13, ..., 8
#
# so the indexing logic is a little funky.
if self.index < 8:
self.force_loop_index = 7 - self.index
else:
self.force_loop_index = 15 - (self.index - 8)
def _sample_used(self, index):
return (self._data.sample_ends[index] > 0)
def _get_sample_data_bounds(self, index=None, sample_ends=None):
if index is None:
index = self.index
if sample_ends is None:
sample_ends = self._data.sample_ends
if index == 0:
sample_start = 0
sample_end = sample_ends[0]
else:
sample_start = sample_ends[index - 1]
sample_end = sample_ends[index]
# Sample end addresses are relative to the start of the kit's sample memory
sample_start = sample_start - SAMPLE_START_ADDRESS
sample_end = sample_end - SAMPLE_START_ADDRESS
# Multiply all sample bounds by two since we're dealing with nibbles
# and the offsets are stored as bytes
sample_start = sample_start * 2
sample_end = sample_end * 2
sample_start = max(sample_start, 0)
sample_end = max(sample_end, 0)
return (sample_start, sample_end)
def _get_sample_length(self, index):
if not self._sample_used(index):
return 0
else:
sample_start, sample_end = self._get_sample_data_bounds(index)
return (sample_end - sample_start + 1)
def _get_sample_data(self, index):
if not self._sample_used(index):
return None
sample_start, sample_end = self._get_sample_data_bounds(index)
return bytearray(self._data.sample_data[sample_start:sample_end])
@property
def force_loop(self):
"""true if the sample will loop, false otherwise"""
return self._data.force_loop[self.force_loop_index]
@force_loop.setter
def force_loop(self, value):
self._data.force_loop[self.force_loop_index] = value
@property
def name(self):
"""the sample's name"""
return self._data.sample_names[self.index]
@name.setter
def name(self, value):
self._data.sample_names[self.index] = fixed_width_string(
value, KIT_SAMPLE_NAME_LENGTH, '-')
@property
def used(self):
"""True if the sample's memory is in use, false otherwise"""
return self._sample_used(self.index)
@property
def sample_data(self):
"""The raw hex nibbles that comprise the sample"""
return self._get_sample_data(self.index)
@sample_data.setter
def sample_data(self, sample_data):
# For simplicity, we'll just pack samples into their new locations and
# overwrite the sample memory for the kit.
new_sample_ends = []
new_sample_data = []
for i in range(SAMPLES_PER_KIT):
if not self._sample_used(i) and i != self.index:
# We've found the first unused sample; since samples are
# presumed to be contiguous, this means we're done moving
# samples
break
if i == self.index:
new_sample_data.extend(sample_data)
else:
current_sample_data = self._get_sample_data(i)
if current_sample_data is not None:
new_sample_data.extend(current_sample_data)
new_sample_ends.append(int(len(new_sample_data) / 2))
if len(new_sample_ends) < SAMPLES_PER_KIT:
new_sample_ends.extend([0] * (SAMPLES_PER_KIT - len(new_sample_ends)))
if len(new_sample_data) < MAX_SAMPLE_LENGTH * 2:
new_sample_data.extend([0] * ((MAX_SAMPLE_LENGTH * 2) - len(new_sample_data)))
elif len(new_sample_data) > MAX_SAMPLE_LENGTH * 2:
raise Exception('Not enough sample memory to add this sample to its kit')
self._data.sample_data = new_sample_data
self._data.sample_ends = new_sample_ends
def __str__(self):
sample_start, sample_end = self._get_sample_data_bounds()
return '%s [0x%04x - 0x%04x]' % (self.name, sample_start, sample_end)
@property
def length(self):
"""the length of the sample, in bytes"""
sample_start, sample_end = self._get_sample_data_bounds()
return (sample_end - sample_start + 1) * 4
def read_wav(self, filename):
"""Read sample data for this sample from a WAV file.
:param filename: the file from which to read
"""
wave_input = None
try:
wave_input = wave.open(filename, 'r')
wave_frames = bytearray(
wave_input.readframes(wave_input.getnframes()))
self.sample_data = [x >> 4 for x in wave_frames]
finally:
if wave_input is not None:
wave_input.close()
|
alexras/pylsdj | pylsdj/kits.py | KitSample.read_wav | python | def read_wav(self, filename):
wave_input = None
try:
wave_input = wave.open(filename, 'r')
wave_frames = bytearray(
wave_input.readframes(wave_input.getnframes()))
self.sample_data = [x >> 4 for x in wave_frames]
finally:
if wave_input is not None:
wave_input.close() | Read sample data for this sample from a WAV file.
:param filename: the file from which to read | train | https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/kits.py#L248-L264 | null | class KitSample(object):
def __init__(self, data, index):
self._data = data
self.index = index
# Because of data layout, indices for bits are
#
# 7, 6, 5, ..., 0, unused, 14, 13, ..., 8
#
# so the indexing logic is a little funky.
if self.index < 8:
self.force_loop_index = 7 - self.index
else:
self.force_loop_index = 15 - (self.index - 8)
def _sample_used(self, index):
return (self._data.sample_ends[index] > 0)
def _get_sample_data_bounds(self, index=None, sample_ends=None):
if index is None:
index = self.index
if sample_ends is None:
sample_ends = self._data.sample_ends
if index == 0:
sample_start = 0
sample_end = sample_ends[0]
else:
sample_start = sample_ends[index - 1]
sample_end = sample_ends[index]
# Sample end addresses are relative to the start of the kit's sample memory
sample_start = sample_start - SAMPLE_START_ADDRESS
sample_end = sample_end - SAMPLE_START_ADDRESS
# Multiply all sample bounds by two since we're dealing with nibbles
# and the offsets are stored as bytes
sample_start = sample_start * 2
sample_end = sample_end * 2
sample_start = max(sample_start, 0)
sample_end = max(sample_end, 0)
return (sample_start, sample_end)
def _get_sample_length(self, index):
if not self._sample_used(index):
return 0
else:
sample_start, sample_end = self._get_sample_data_bounds(index)
return (sample_end - sample_start + 1)
def _get_sample_data(self, index):
if not self._sample_used(index):
return None
sample_start, sample_end = self._get_sample_data_bounds(index)
return bytearray(self._data.sample_data[sample_start:sample_end])
@property
def force_loop(self):
"""true if the sample will loop, false otherwise"""
return self._data.force_loop[self.force_loop_index]
@force_loop.setter
def force_loop(self, value):
self._data.force_loop[self.force_loop_index] = value
@property
def name(self):
"""the sample's name"""
return self._data.sample_names[self.index]
@name.setter
def name(self, value):
self._data.sample_names[self.index] = fixed_width_string(
value, KIT_SAMPLE_NAME_LENGTH, '-')
@property
def used(self):
"""True if the sample's memory is in use, false otherwise"""
return self._sample_used(self.index)
@property
def sample_data(self):
"""The raw hex nibbles that comprise the sample"""
return self._get_sample_data(self.index)
@sample_data.setter
def sample_data(self, sample_data):
# For simplicity, we'll just pack samples into their new locations and
# overwrite the sample memory for the kit.
new_sample_ends = []
new_sample_data = []
for i in range(SAMPLES_PER_KIT):
if not self._sample_used(i) and i != self.index:
# We've found the first unused sample; since samples are
# presumed to be contiguous, this means we're done moving
# samples
break
if i == self.index:
new_sample_data.extend(sample_data)
else:
current_sample_data = self._get_sample_data(i)
if current_sample_data is not None:
new_sample_data.extend(current_sample_data)
new_sample_ends.append(int(len(new_sample_data) / 2))
if len(new_sample_ends) < SAMPLES_PER_KIT:
new_sample_ends.extend([0] * (SAMPLES_PER_KIT - len(new_sample_ends)))
if len(new_sample_data) < MAX_SAMPLE_LENGTH * 2:
new_sample_data.extend([0] * ((MAX_SAMPLE_LENGTH * 2) - len(new_sample_data)))
elif len(new_sample_data) > MAX_SAMPLE_LENGTH * 2:
raise Exception('Not enough sample memory to add this sample to its kit')
self._data.sample_data = new_sample_data
self._data.sample_ends = new_sample_ends
def __str__(self):
sample_start, sample_end = self._get_sample_data_bounds()
return '%s [0x%04x - 0x%04x]' % (self.name, sample_start, sample_end)
@property
def length(self):
"""the length of the sample, in bytes"""
sample_start, sample_end = self._get_sample_data_bounds()
return (sample_end - sample_start + 1) * 4
def write_wav(self, filename):
"""Write this sample to a WAV file.
:param filename: the file to which to write
"""
wave_output = None
try:
wave_output = wave.open(filename, 'w')
wave_output.setparams(WAVE_PARAMS)
frames = bytearray([x << 4 for x in self.sample_data])
wave_output.writeframes(frames)
finally:
if wave_output is not None:
wave_output.close()
|
davidmcclure/textplot | textplot/helpers.py | build_graph | python | def build_graph(path, term_depth=1000, skim_depth=10,
d_weights=False, **kwargs):
# Tokenize text.
click.echo('\nTokenizing text...')
t = Text.from_file(path)
click.echo('Extracted %d tokens' % len(t.tokens))
m = Matrix()
# Index the term matrix.
click.echo('\nIndexing terms:')
m.index(t, t.most_frequent_terms(term_depth), **kwargs)
g = Skimmer()
# Construct the network.
click.echo('\nGenerating graph:')
g.build(t, m, skim_depth, d_weights)
return g | Tokenize a text, index a term matrix, and build out a graph.
Args:
path (str): The file path.
term_depth (int): Consider the N most frequent terms.
skim_depth (int): Connect each word to the N closest siblings.
d_weights (bool): If true, give "close" nodes low weights.
Returns:
Skimmer: The indexed graph. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/helpers.py#L10-L43 | [
"def build(self, text, matrix, skim_depth=10, d_weights=False):\n\n \"\"\"\n 1. For each term in the passed matrix, score its KDE similarity with\n all other indexed terms.\n\n 2. With the ordered stack of similarities in hand, skim off the top X\n pairs and add them as edges.\n\n Args:\n text (Text): The source text instance.\n matrix (Matrix): An indexed term matrix.\n skim_depth (int): The number of siblings for each term.\n d_weights (bool): If true, give \"close\" words low edge weights.\n \"\"\"\n\n for anchor in bar(matrix.keys):\n\n n1 = text.unstem(anchor)\n\n # Heaviest pair scores:\n pairs = matrix.anchored_pairs(anchor).items()\n for term, weight in list(pairs)[:skim_depth]:\n\n # If edges represent distance, use the complement of the raw\n # score, so that similar words are connected by \"short\" edges.\n if d_weights: weight = 1-weight\n\n n2 = text.unstem(term)\n\n # NetworkX does not handle numpy types when writing graphml,\n # so we cast the weight to a regular float.\n self.graph.add_edge(n1, n2, weight=float(weight))\n",
"def index(self, text, terms=None, **kwargs):\n\n \"\"\"\n Index all term pair distances.\n\n Args:\n text (Text): The source text.\n terms (list): Terms to index.\n \"\"\"\n\n self.clear()\n\n # By default, use all terms.\n terms = terms or text.terms.keys()\n\n pairs = combinations(terms, 2)\n count = comb(len(terms), 2)\n\n for t1, t2 in bar(pairs, expected_size=count, every=1000):\n\n # Set the Bray-Curtis distance.\n score = text.score_braycurtis(t1, t2, **kwargs)\n self.set_pair(t1, t2, score)\n",
"def from_file(cls, path):\n\n \"\"\"\n Create a text from a file.\n\n Args:\n path (str): The file path.\n \"\"\"\n\n with open(path, 'r', errors='replace') as f:\n return cls(f.read())\n",
"def most_frequent_terms(self, depth):\n\n \"\"\"\n Get the X most frequent terms in the text, and then probe down to get\n any other terms that have the same count as the last term.\n\n Args:\n depth (int): The number of terms.\n\n Returns:\n set: The set of frequent terms.\n \"\"\"\n\n counts = self.term_counts()\n\n # Get the top X terms and the instance count of the last word.\n top_terms = set(list(counts.keys())[:depth])\n end_count = list(counts.values())[:depth][-1]\n\n # Merge in all other words with that appear that number of times, so\n # that we don't truncate the last bucket - eg, half of the words that\n # appear 5 times, but not the other half.\n\n bucket = self.term_count_buckets()[end_count]\n return top_terms.union(set(bucket))\n"
] |
import click
from textplot.text import Text
from textplot.graphs import Skimmer
from textplot.matrix import Matrix
|
davidmcclure/textplot | textplot/graphs.py | Graph.draw_spring | python | def draw_spring(self, **kwargs):
nx.draw_spring(
self.graph,
with_labels=True,
font_size=10,
edge_color='#dddddd',
node_size=0,
**kwargs
)
plt.show() | Render a spring layout. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/graphs.py#L27-L42 | null | class Graph(metaclass=ABCMeta):
def __init__(self):
"""
Initialize the graph.
"""
self.graph = nx.Graph()
@abstractmethod
def build(self):
pass
def write_gml(self, path):
"""
Write a GML file.
Args:
path (str): The file path.
"""
nx.write_gml(self.graph, path)
def write_graphml(self, path):
"""
Write a GraphML file.
Args:
path (str): The file path.
"""
nx.write_graphml(self.graph, path)
|
davidmcclure/textplot | textplot/graphs.py | Skimmer.build | python | def build(self, text, matrix, skim_depth=10, d_weights=False):
for anchor in bar(matrix.keys):
n1 = text.unstem(anchor)
# Heaviest pair scores:
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
# If edges represent distance, use the complement of the raw
# score, so that similar words are connected by "short" edges.
if d_weights: weight = 1-weight
n2 = text.unstem(term)
# NetworkX does not handle numpy types when writing graphml,
# so we cast the weight to a regular float.
self.graph.add_edge(n1, n2, weight=float(weight)) | 1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/graphs.py#L72-L104 | [
"def anchored_pairs(self, anchor):\n\n \"\"\"\n Get distances between an anchor term and all other terms.\n\n Args:\n anchor (str): The anchor term.\n\n Returns:\n OrderedDict: The distances, in descending order.\n \"\"\"\n\n pairs = OrderedDict()\n\n for term in self.keys:\n score = self.get_pair(anchor, term)\n if score: pairs[term] = score\n\n return utils.sort_dict(pairs)\n",
"def unstem(self, term):\n\n \"\"\"\n Given a stemmed term, get the most common unstemmed variant.\n\n Args:\n term (str): A stemmed term.\n\n Returns:\n str: The unstemmed token.\n \"\"\"\n\n originals = []\n for i in self.terms[term]:\n originals.append(self.tokens[i]['unstemmed'])\n\n mode = Counter(originals).most_common(1)\n return mode[0][0]\n"
] | class Skimmer(Graph):
|
davidmcclure/textplot | textplot/utils.py | tokenize | python | def tokenize(text):
stem = PorterStemmer().stem
tokens = re.finditer('[a-z]+', text.lower())
for offset, match in enumerate(tokens):
# Get the raw token.
unstemmed = match.group(0)
yield { # Emit the token.
'stemmed': stem(unstemmed),
'unstemmed': unstemmed,
'offset': offset
} | Yield tokens.
Args:
text (str): The original text.
Yields:
dict: The next token. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/utils.py#L12-L36 | null |
import re
import numpy as np
import functools
from collections import OrderedDict
from nltk.stem import PorterStemmer
from itertools import islice
def sort_dict(d, desc=True):
"""
Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary.
"""
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort)
def window(seq, n=2):
"""
Yield a sliding window over an iterable.
Args:
seq (iter): The sequence.
n (int): The window width.
Yields:
tuple: The next window.
"""
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for token in it:
result = result[1:] + (token,)
yield result
|
davidmcclure/textplot | textplot/utils.py | sort_dict | python | def sort_dict(d, desc=True):
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort) | Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/utils.py#L39-L53 | null |
import re
import numpy as np
import functools
from collections import OrderedDict
from nltk.stem import PorterStemmer
from itertools import islice
def tokenize(text):
"""
Yield tokens.
Args:
text (str): The original text.
Yields:
dict: The next token.
"""
stem = PorterStemmer().stem
tokens = re.finditer('[a-z]+', text.lower())
for offset, match in enumerate(tokens):
# Get the raw token.
unstemmed = match.group(0)
yield { # Emit the token.
'stemmed': stem(unstemmed),
'unstemmed': unstemmed,
'offset': offset
}
def window(seq, n=2):
"""
Yield a sliding window over an iterable.
Args:
seq (iter): The sequence.
n (int): The window width.
Yields:
tuple: The next window.
"""
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for token in it:
result = result[1:] + (token,)
yield result
|
davidmcclure/textplot | textplot/utils.py | window | python | def window(seq, n=2):
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for token in it:
result = result[1:] + (token,)
yield result | Yield a sliding window over an iterable.
Args:
seq (iter): The sequence.
n (int): The window width.
Yields:
tuple: The next window. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/utils.py#L56-L77 | null |
import re
import numpy as np
import functools
from collections import OrderedDict
from nltk.stem import PorterStemmer
from itertools import islice
def tokenize(text):
"""
Yield tokens.
Args:
text (str): The original text.
Yields:
dict: The next token.
"""
stem = PorterStemmer().stem
tokens = re.finditer('[a-z]+', text.lower())
for offset, match in enumerate(tokens):
# Get the raw token.
unstemmed = match.group(0)
yield { # Emit the token.
'stemmed': stem(unstemmed),
'unstemmed': unstemmed,
'offset': offset
}
def sort_dict(d, desc=True):
"""
Sort an ordered dictionary by value, descending.
Args:
d (OrderedDict): An ordered dictionary.
desc (bool): If true, sort desc.
Returns:
OrderedDict: The sorted dictionary.
"""
sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)
return OrderedDict(sort)
|
davidmcclure/textplot | textplot/matrix.py | Matrix.set_pair | python | def set_pair(self, term1, term2, value, **kwargs):
key = self.key(term1, term2)
self.keys.update([term1, term2])
self.pairs[key] = value | Set the value for a pair of terms.
Args:
term1 (str)
term2 (str)
value (mixed) | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/matrix.py#L50-L63 | [
"def key(self, term1, term2):\n\n \"\"\"\n Get an order-independent key for a pair of terms.\n\n Args:\n term1 (str)\n term2 (str)\n\n Returns:\n str: The dictionary key.\n \"\"\"\n\n return tuple(sorted((term1, term2)))\n"
] | class Matrix:
def __init__(self):
"""
Initialize the underlying dictionary.
"""
self.clear()
def clear(self):
"""
Reset the pair mappings and key set.
"""
self.keys = set()
self.pairs = {}
def key(self, term1, term2):
"""
Get an order-independent key for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
str: The dictionary key.
"""
return tuple(sorted((term1, term2)))
def get_pair(self, term1, term2):
"""
Get the value for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
The stored value.
"""
key = self.key(term1, term2)
return self.pairs.get(key, None)
def index(self, text, terms=None, **kwargs):
"""
Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index.
"""
self.clear()
# By default, use all terms.
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
# Set the Bray-Curtis distance.
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score)
def anchored_pairs(self, anchor):
"""
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
"""
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs)
|
davidmcclure/textplot | textplot/matrix.py | Matrix.get_pair | python | def get_pair(self, term1, term2):
key = self.key(term1, term2)
return self.pairs.get(key, None) | Get the value for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
The stored value. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/matrix.py#L66-L80 | [
"def key(self, term1, term2):\n\n \"\"\"\n Get an order-independent key for a pair of terms.\n\n Args:\n term1 (str)\n term2 (str)\n\n Returns:\n str: The dictionary key.\n \"\"\"\n\n return tuple(sorted((term1, term2)))\n"
] | class Matrix:
def __init__(self):
"""
Initialize the underlying dictionary.
"""
self.clear()
def clear(self):
"""
Reset the pair mappings and key set.
"""
self.keys = set()
self.pairs = {}
def key(self, term1, term2):
"""
Get an order-independent key for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
str: The dictionary key.
"""
return tuple(sorted((term1, term2)))
def set_pair(self, term1, term2, value, **kwargs):
"""
Set the value for a pair of terms.
Args:
term1 (str)
term2 (str)
value (mixed)
"""
key = self.key(term1, term2)
self.keys.update([term1, term2])
self.pairs[key] = value
def index(self, text, terms=None, **kwargs):
"""
Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index.
"""
self.clear()
# By default, use all terms.
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
# Set the Bray-Curtis distance.
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score)
def anchored_pairs(self, anchor):
"""
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
"""
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs)
|
davidmcclure/textplot | textplot/matrix.py | Matrix.index | python | def index(self, text, terms=None, **kwargs):
self.clear()
# By default, use all terms.
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
# Set the Bray-Curtis distance.
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score) | Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/matrix.py#L83-L105 | [
"def clear(self):\n\n \"\"\"\n Reset the pair mappings and key set.\n \"\"\"\n\n self.keys = set()\n self.pairs = {}\n",
"def set_pair(self, term1, term2, value, **kwargs):\n\n \"\"\"\n Set the value for a pair of terms.\n\n Args:\n term1 (str)\n term2 (str)\n value (mixed)\n \"\"\"\n\n key = self.key(term1, term2)\n self.keys.update([term1, term2])\n self.pairs[key] = value\n",
"def score_braycurtis(self, term1, term2, **kwargs):\n\n \"\"\"\n Compute a weighting score based on the \"City Block\" distance between\n the kernel density estimates of two terms.\n\n Args:\n term1 (str)\n term2 (str)\n\n Returns: float\n \"\"\"\n\n t1_kde = self.kde(term1, **kwargs)\n t2_kde = self.kde(term2, **kwargs)\n\n return 1-distance.braycurtis(t1_kde, t2_kde)\n"
] | class Matrix:
def __init__(self):
"""
Initialize the underlying dictionary.
"""
self.clear()
def clear(self):
"""
Reset the pair mappings and key set.
"""
self.keys = set()
self.pairs = {}
def key(self, term1, term2):
"""
Get an order-independent key for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
str: The dictionary key.
"""
return tuple(sorted((term1, term2)))
def set_pair(self, term1, term2, value, **kwargs):
"""
Set the value for a pair of terms.
Args:
term1 (str)
term2 (str)
value (mixed)
"""
key = self.key(term1, term2)
self.keys.update([term1, term2])
self.pairs[key] = value
def get_pair(self, term1, term2):
"""
Get the value for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
The stored value.
"""
key = self.key(term1, term2)
return self.pairs.get(key, None)
def anchored_pairs(self, anchor):
"""
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
"""
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs)
|
davidmcclure/textplot | textplot/matrix.py | Matrix.anchored_pairs | python | def anchored_pairs(self, anchor):
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs) | Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/matrix.py#L108-L126 | [
"def sort_dict(d, desc=True):\n\n \"\"\"\n Sort an ordered dictionary by value, descending.\n\n Args:\n d (OrderedDict): An ordered dictionary.\n desc (bool): If true, sort desc.\n\n Returns:\n OrderedDict: The sorted dictionary.\n \"\"\"\n\n sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)\n return OrderedDict(sort)\n",
"def get_pair(self, term1, term2):\n\n \"\"\"\n Get the value for a pair of terms.\n\n Args:\n term1 (str)\n term2 (str)\n\n Returns:\n The stored value.\n \"\"\"\n\n key = self.key(term1, term2)\n return self.pairs.get(key, None)\n"
] | class Matrix:
def __init__(self):
"""
Initialize the underlying dictionary.
"""
self.clear()
def clear(self):
"""
Reset the pair mappings and key set.
"""
self.keys = set()
self.pairs = {}
def key(self, term1, term2):
"""
Get an order-independent key for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
str: The dictionary key.
"""
return tuple(sorted((term1, term2)))
def set_pair(self, term1, term2, value, **kwargs):
"""
Set the value for a pair of terms.
Args:
term1 (str)
term2 (str)
value (mixed)
"""
key = self.key(term1, term2)
self.keys.update([term1, term2])
self.pairs[key] = value
def get_pair(self, term1, term2):
"""
Get the value for a pair of terms.
Args:
term1 (str)
term2 (str)
Returns:
The stored value.
"""
key = self.key(term1, term2)
return self.pairs.get(key, None)
def index(self, text, terms=None, **kwargs):
"""
Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index.
"""
self.clear()
# By default, use all terms.
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
# Set the Bray-Curtis distance.
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score)
|
davidmcclure/textplot | textplot/text.py | Text.from_file | python | def from_file(cls, path):
with open(path, 'r', errors='replace') as f:
return cls(f.read()) | Create a text from a file.
Args:
path (str): The file path. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L22-L32 | null | class Text:
@classmethod
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.load_stopwords | python | def load_stopwords(self, path):
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
) | Load a set of stopwords.
Args:
path (str): The stopwords file path. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L50-L69 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.tokenize | python | def tokenize(self):
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset']) | Tokenize the text. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L72-L95 | [
"def tokenize(text):\n\n \"\"\"\n Yield tokens.\n\n Args:\n text (str): The original text.\n\n Yields:\n dict: The next token.\n \"\"\"\n\n stem = PorterStemmer().stem\n tokens = re.finditer('[a-z]+', text.lower())\n\n for offset, match in enumerate(tokens):\n\n # Get the raw token.\n unstemmed = match.group(0)\n\n yield { # Emit the token.\n 'stemmed': stem(unstemmed),\n 'unstemmed': unstemmed,\n 'offset': offset\n }\n"
] | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.term_counts | python | def term_counts(self):
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts) | Returns:
OrderedDict: An ordered dictionary of term counts. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L98-L109 | [
"def sort_dict(d, desc=True):\n\n \"\"\"\n Sort an ordered dictionary by value, descending.\n\n Args:\n d (OrderedDict): An ordered dictionary.\n desc (bool): If true, sort desc.\n\n Returns:\n OrderedDict: The sorted dictionary.\n \"\"\"\n\n sort = sorted(d.items(), key=lambda x: x[1], reverse=desc)\n return OrderedDict(sort)\n"
] | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.term_count_buckets | python | def term_count_buckets(self):
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets | Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L112-L125 | [
"def term_counts(self):\n\n \"\"\"\n Returns:\n OrderedDict: An ordered dictionary of term counts.\n \"\"\"\n\n counts = OrderedDict()\n for term in self.terms:\n counts[term] = len(self.terms[term])\n\n return utils.sort_dict(counts)\n"
] | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.most_frequent_terms | python | def most_frequent_terms(self, depth):
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket)) | Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L128-L152 | [
"def term_counts(self):\n\n \"\"\"\n Returns:\n OrderedDict: An ordered dictionary of term counts.\n \"\"\"\n\n counts = OrderedDict()\n for term in self.terms:\n counts[term] = len(self.terms[term])\n\n return utils.sort_dict(counts)\n",
"def term_count_buckets(self):\n\n \"\"\"\n Returns:\n dict: A dictionary that maps occurrence counts to the terms that\n appear that many times in the text.\n \"\"\"\n\n buckets = {}\n for term, count in self.term_counts().items():\n if count in buckets: buckets[count].append(term)\n else: buckets[count] = [term]\n\n return buckets\n"
] | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.unstem | python | def unstem(self, term):
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0] | Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L155-L172 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.kde | python | def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples) | Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L176-L202 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.score_intersect | python | def score_intersect(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap) | Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L205-L223 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.score_cosine | python | def score_cosine(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde) | Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L226-L242 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.score_braycurtis | python | def score_braycurtis(self, term1, term2, **kwargs):
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde) | Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L245-L261 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def plot_term_kdes(self, words, **kwargs):
"""
Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms.
"""
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show()
|
davidmcclure/textplot | textplot/text.py | Text.plot_term_kdes | python | def plot_term_kdes(self, words, **kwargs):
stem = PorterStemmer().stem
for word in words:
kde = self.kde(stem(word), **kwargs)
plt.plot(kde)
plt.show() | Plot kernel density estimates for multiple words.
Args:
words (list): A list of unstemmed terms. | train | https://github.com/davidmcclure/textplot/blob/889b949a637d99097ecec44ed4bfee53b1964dee/textplot/text.py#L264-L279 | null | class Text:
@classmethod
def from_file(cls, path):
"""
Create a text from a file.
Args:
path (str): The file path.
"""
with open(path, 'r', errors='replace') as f:
return cls(f.read())
def __init__(self, text, stopwords=None):
"""
Store the raw text, tokenize.
Args:
text (str): The raw text string.
stopwords (str): A custom stopwords list path.
"""
self.text = text
self.load_stopwords(stopwords)
self.tokenize()
def load_stopwords(self, path):
"""
Load a set of stopwords.
Args:
path (str): The stopwords file path.
"""
if path:
with open(path) as f:
self.stopwords = set(f.read().splitlines())
else:
self.stopwords = set(
pkgutil
.get_data('textplot', 'data/stopwords.txt')
.decode('utf8')
.splitlines()
)
def tokenize(self):
"""
Tokenize the text.
"""
self.tokens = []
self.terms = OrderedDict()
# Generate tokens.
for token in utils.tokenize(self.text):
# Ignore stopwords.
if token['unstemmed'] in self.stopwords:
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def term_counts(self):
"""
Returns:
OrderedDict: An ordered dictionary of term counts.
"""
counts = OrderedDict()
for term in self.terms:
counts[term] = len(self.terms[term])
return utils.sort_dict(counts)
def term_count_buckets(self):
"""
Returns:
dict: A dictionary that maps occurrence counts to the terms that
appear that many times in the text.
"""
buckets = {}
for term, count in self.term_counts().items():
if count in buckets: buckets[count].append(term)
else: buckets[count] = [term]
return buckets
def most_frequent_terms(self, depth):
"""
Get the X most frequent terms in the text, and then probe down to get
any other terms that have the same count as the last term.
Args:
depth (int): The number of terms.
Returns:
set: The set of frequent terms.
"""
counts = self.term_counts()
# Get the top X terms and the instance count of the last word.
top_terms = set(list(counts.keys())[:depth])
end_count = list(counts.values())[:depth][-1]
# Merge in all other words with that appear that number of times, so
# that we don't truncate the last bucket - eg, half of the words that
# appear 5 times, but not the other half.
bucket = self.term_count_buckets()[end_count]
return top_terms.union(set(bucket))
def unstem(self, term):
"""
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
"""
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
@lru_cache(maxsize=None)
def kde(self, term, bandwidth=2000, samples=1000, kernel='gaussian'):
"""
Estimate the kernel density of the instances of term in the text.
Args:
term (str): A stemmed term.
bandwidth (int): The kernel bandwidth.
samples (int): The number of evenly-spaced sample points.
kernel (str): The kernel function.
Returns:
np.array: The density estimate.
"""
# Get the offsets of the term instances.
terms = np.array(self.terms[term])[:, np.newaxis]
# Fit the density estimator on the terms.
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(terms)
# Score an evely-spaced array of samples.
x_axis = np.linspace(0, len(self.tokens), samples)[:, np.newaxis]
scores = kde.score_samples(x_axis)
# Scale the scores to integrate to 1.
return np.exp(scores) * (len(self.tokens) / samples)
def score_intersect(self, term1, term2, **kwargs):
"""
Compute the geometric area of the overlap between the kernel density
estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
# Integrate the overlap.
overlap = np.minimum(t1_kde, t2_kde)
return np.trapz(overlap)
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
def score_braycurtis(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the "City Block" distance between
the kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.braycurtis(t1_kde, t2_kde)
|
mkouhei/bootstrap-py | bootstrap_py/control.py | _pp | python | def _pp(dict_data):
for key, val in dict_data.items():
# pylint: disable=superfluous-parens
print('{0:<11}: {1}'.format(key, val)) | Pretty print. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L11-L15 | null | # -*- coding: utf-8 -*-
"""bootstrap_py.control."""
import os
import sys
from bootstrap_py import package, pypi
from bootstrap_py.classifiers import Classifiers
from bootstrap_py.vcs import VCS
from bootstrap_py.exceptions import Conflict
def retreive_metadata():
"""Retrieve metadata.
:rtype: bootstrap_py.classifiers.Classifiers
:return: Classifiers()
"""
return Classifiers()
def print_licences(params, metadata):
"""Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata
"""
if hasattr(params, 'licenses'):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0)
def check_repository_existence(params):
"""Check repository existence.
:param argparse.Namespace params: parameters
"""
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir))
def check_package_existence(params):
"""Check package existence.
:param argparse.Namespace params: parameters
"""
if not params.no_check:
pypi.package_existent(params.name)
def generate_package(params):
"""Generate package repository.
:param argparse.Namespace params: parameters
"""
pkg_data = package.PackageData(params)
pkg_tree = package.PackageTree(pkg_data)
pkg_tree.generate()
pkg_tree.move()
VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)
|
mkouhei/bootstrap-py | bootstrap_py/control.py | print_licences | python | def print_licences(params, metadata):
if hasattr(params, 'licenses'):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0) | Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L27-L36 | [
"def _pp(dict_data):\n \"\"\"Pretty print.\"\"\"\n for key, val in dict_data.items():\n # pylint: disable=superfluous-parens\n print('{0:<11}: {1}'.format(key, val))\n",
"def licenses_desc(self):\n \"\"\"Remove prefix.\"\"\"\n return {self._acronym_lic(l): l.split(self.prefix_lic)[1]\n for l in self.resp_text.split('\\n')\n if l.startswith(self.prefix_lic)}\n"
] | # -*- coding: utf-8 -*-
"""bootstrap_py.control."""
import os
import sys
from bootstrap_py import package, pypi
from bootstrap_py.classifiers import Classifiers
from bootstrap_py.vcs import VCS
from bootstrap_py.exceptions import Conflict
def _pp(dict_data):
"""Pretty print."""
for key, val in dict_data.items():
# pylint: disable=superfluous-parens
print('{0:<11}: {1}'.format(key, val))
def retreive_metadata():
"""Retrieve metadata.
:rtype: bootstrap_py.classifiers.Classifiers
:return: Classifiers()
"""
return Classifiers()
def check_repository_existence(params):
"""Check repository existence.
:param argparse.Namespace params: parameters
"""
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir))
def check_package_existence(params):
"""Check package existence.
:param argparse.Namespace params: parameters
"""
if not params.no_check:
pypi.package_existent(params.name)
def generate_package(params):
"""Generate package repository.
:param argparse.Namespace params: parameters
"""
pkg_data = package.PackageData(params)
pkg_tree = package.PackageTree(pkg_data)
pkg_tree.generate()
pkg_tree.move()
VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)
|
mkouhei/bootstrap-py | bootstrap_py/control.py | check_repository_existence | python | def check_repository_existence(params):
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir)) | Check repository existence.
:param argparse.Namespace params: parameters | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L39-L47 | null | # -*- coding: utf-8 -*-
"""bootstrap_py.control."""
import os
import sys
from bootstrap_py import package, pypi
from bootstrap_py.classifiers import Classifiers
from bootstrap_py.vcs import VCS
from bootstrap_py.exceptions import Conflict
def _pp(dict_data):
"""Pretty print."""
for key, val in dict_data.items():
# pylint: disable=superfluous-parens
print('{0:<11}: {1}'.format(key, val))
def retreive_metadata():
"""Retrieve metadata.
:rtype: bootstrap_py.classifiers.Classifiers
:return: Classifiers()
"""
return Classifiers()
def print_licences(params, metadata):
"""Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata
"""
if hasattr(params, 'licenses'):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0)
def check_package_existence(params):
"""Check package existence.
:param argparse.Namespace params: parameters
"""
if not params.no_check:
pypi.package_existent(params.name)
def generate_package(params):
"""Generate package repository.
:param argparse.Namespace params: parameters
"""
pkg_data = package.PackageData(params)
pkg_tree = package.PackageTree(pkg_data)
pkg_tree.generate()
pkg_tree.move()
VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)
|
mkouhei/bootstrap-py | bootstrap_py/control.py | generate_package | python | def generate_package(params):
pkg_data = package.PackageData(params)
pkg_tree = package.PackageTree(pkg_data)
pkg_tree.generate()
pkg_tree.move()
VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data) | Generate package repository.
:param argparse.Namespace params: parameters | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L59-L68 | [
"def move(self):\n \"\"\"Move directory from working directory to output directory.\"\"\"\n if not os.path.isdir(self.outdir):\n os.makedirs(self.outdir)\n shutil.move(self.tmpdir, os.path.join(self.outdir, self.name))\n",
"def generate(self):\n \"\"\"Generate package directory tree.\"\"\"\n self._generate_docs()\n self._generate_dirs()\n self._generate_files()\n"
] | # -*- coding: utf-8 -*-
"""bootstrap_py.control."""
import os
import sys
from bootstrap_py import package, pypi
from bootstrap_py.classifiers import Classifiers
from bootstrap_py.vcs import VCS
from bootstrap_py.exceptions import Conflict
def _pp(dict_data):
"""Pretty print."""
for key, val in dict_data.items():
# pylint: disable=superfluous-parens
print('{0:<11}: {1}'.format(key, val))
def retreive_metadata():
"""Retrieve metadata.
:rtype: bootstrap_py.classifiers.Classifiers
:return: Classifiers()
"""
return Classifiers()
def print_licences(params, metadata):
"""Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata
"""
if hasattr(params, 'licenses'):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0)
def check_repository_existence(params):
"""Check repository existence.
:param argparse.Namespace params: parameters
"""
repodir = os.path.join(params.outdir, params.name)
if os.path.isdir(repodir):
raise Conflict(
'Package repository "{0}" has already exists.'.format(repodir))
def check_package_existence(params):
"""Check package existence.
:param argparse.Namespace params: parameters
"""
if not params.no_check:
pypi.package_existent(params.name)
|
mkouhei/bootstrap-py | bootstrap_py/docs.py | build_sphinx | python | def build_sphinx(pkg_data, projectdir):
try:
version, _minor_version = pkg_data.version.rsplit('.', 1)
except ValueError:
version = pkg_data.version
args = ' '.join(('sphinx-quickstart',
'--sep',
'-q',
'-p "{name}"',
'-a "{author}"',
'-v "{version}"',
'-r "{release}"',
'-l en',
'--suffix=.rst',
'--master=index',
'--ext-autodoc',
'--ext-viewcode',
'--makefile',
'{projectdir}')).format(name=pkg_data.name,
author=pkg_data.author,
version=version,
release=pkg_data.version,
projectdir=projectdir)
if subprocess.call(shlex.split(args)) == 0:
_touch_gitkeep(projectdir) | Build sphinx documentation.
:rtype: int
:return: subprocess.call return code
:param `bootstrap_py.control.PackageData` pkg_data: package meta data
:param str projectdir: project root directory | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/docs.py#L8-L40 | [
"def _touch_gitkeep(docs_path):\n with open(os.path.join(docs_path,\n 'source',\n '_static',\n '.gitkeep'), 'w') as fobj:\n fobj.write('')\n"
] | # -*- coding: utf-8 -*-
"""bootstrap_py.docs."""
import os.path
import shlex
import subprocess
def _touch_gitkeep(docs_path):
with open(os.path.join(docs_path,
'source',
'_static',
'.gitkeep'), 'w') as fobj:
fobj.write('')
|
mkouhei/bootstrap-py | bootstrap_py/commands.py | setoption | python | def setoption(parser, metadata=None):
parser.add_argument('-v', action='version',
version=__version__)
subparsers = parser.add_subparsers(help='sub commands help')
create_cmd = subparsers.add_parser('create')
create_cmd.add_argument('name',
help='Specify Python package name.')
create_cmd.add_argument('-d', dest='description', action='store',
help='Short description about your package.')
create_cmd.add_argument('-a', dest='author', action='store',
required=True,
help='Python package author name.')
create_cmd.add_argument('-e', dest='email', action='store',
required=True,
help='Python package author email address.')
create_cmd.add_argument('-l', dest='license',
choices=metadata.licenses().keys(),
default='GPLv3+',
help='Specify license. (default: %(default)s)')
create_cmd.add_argument('-s', dest='status',
choices=metadata.status().keys(),
default='Alpha',
help=('Specify development status. '
'(default: %(default)s)'))
create_cmd.add_argument('--no-check', action='store_true',
help='No checking package name in PyPI.')
create_cmd.add_argument('--with-samples', action='store_true',
help='Generate package with sample code.')
group = create_cmd.add_mutually_exclusive_group(required=True)
group.add_argument('-U', dest='username', action='store',
help='Specify GitHub username.')
group.add_argument('-u', dest='url', action='store', type=valid_url,
help='Python package homepage url.')
create_cmd.add_argument('-o', dest='outdir', action='store',
default=os.path.abspath(os.path.curdir),
help='Specify output directory. (default: $PWD)')
list_cmd = subparsers.add_parser('list')
list_cmd.add_argument('-l', dest='licenses', action='store_true',
help='show license choices.') | Set argument parser option. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L12-L51 | null | # -*- coding: utf-8 -*-
"""bootstrap_py.commands."""
import os
import sys
import re
import argparse
from bootstrap_py import control, __prog__, __version__
from bootstrap_py.update import Update
from bootstrap_py.exceptions import BackendFailure, Conflict
def valid_url(url):
"""Validate url.
:rtype: str
:return: url
:param str url: package homepage url.
"""
regex = re.compile(
r'^(?:http)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(url):
raise argparse.ArgumentTypeError('"{0}" is invalid url.'.format(url))
return url
def parse_options(metadata):
"""Parse argument options."""
parser = argparse.ArgumentParser(description='%(prog)s usage:',
prog=__prog__)
setoption(parser, metadata=metadata)
return parser
def main():
"""Execute main processes."""
try:
pkg_version = Update()
if pkg_version.updatable():
pkg_version.show_message()
metadata = control.retreive_metadata()
parser = parse_options(metadata)
argvs = sys.argv
if len(argvs) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
control.print_licences(args, metadata)
control.check_repository_existence(args)
control.check_package_existence(args)
control.generate_package(args)
except (RuntimeError, BackendFailure, Conflict) as exc:
sys.stderr.write('{0}\n'.format(exc))
sys.exit(1)
if __name__ == '__main__':
main()
|
mkouhei/bootstrap-py | bootstrap_py/commands.py | valid_url | python | def valid_url(url):
regex = re.compile(
r'^(?:http)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(url):
raise argparse.ArgumentTypeError('"{0}" is invalid url.'.format(url))
return url | Validate url.
:rtype: str
:return: url
:param str url: package homepage url. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L54-L69 | null | # -*- coding: utf-8 -*-
"""bootstrap_py.commands."""
import os
import sys
import re
import argparse
from bootstrap_py import control, __prog__, __version__
from bootstrap_py.update import Update
from bootstrap_py.exceptions import BackendFailure, Conflict
def setoption(parser, metadata=None):
"""Set argument parser option."""
parser.add_argument('-v', action='version',
version=__version__)
subparsers = parser.add_subparsers(help='sub commands help')
create_cmd = subparsers.add_parser('create')
create_cmd.add_argument('name',
help='Specify Python package name.')
create_cmd.add_argument('-d', dest='description', action='store',
help='Short description about your package.')
create_cmd.add_argument('-a', dest='author', action='store',
required=True,
help='Python package author name.')
create_cmd.add_argument('-e', dest='email', action='store',
required=True,
help='Python package author email address.')
create_cmd.add_argument('-l', dest='license',
choices=metadata.licenses().keys(),
default='GPLv3+',
help='Specify license. (default: %(default)s)')
create_cmd.add_argument('-s', dest='status',
choices=metadata.status().keys(),
default='Alpha',
help=('Specify development status. '
'(default: %(default)s)'))
create_cmd.add_argument('--no-check', action='store_true',
help='No checking package name in PyPI.')
create_cmd.add_argument('--with-samples', action='store_true',
help='Generate package with sample code.')
group = create_cmd.add_mutually_exclusive_group(required=True)
group.add_argument('-U', dest='username', action='store',
help='Specify GitHub username.')
group.add_argument('-u', dest='url', action='store', type=valid_url,
help='Python package homepage url.')
create_cmd.add_argument('-o', dest='outdir', action='store',
default=os.path.abspath(os.path.curdir),
help='Specify output directory. (default: $PWD)')
list_cmd = subparsers.add_parser('list')
list_cmd.add_argument('-l', dest='licenses', action='store_true',
help='show license choices.')
def parse_options(metadata):
"""Parse argument options."""
parser = argparse.ArgumentParser(description='%(prog)s usage:',
prog=__prog__)
setoption(parser, metadata=metadata)
return parser
def main():
"""Execute main processes."""
try:
pkg_version = Update()
if pkg_version.updatable():
pkg_version.show_message()
metadata = control.retreive_metadata()
parser = parse_options(metadata)
argvs = sys.argv
if len(argvs) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
control.print_licences(args, metadata)
control.check_repository_existence(args)
control.check_package_existence(args)
control.generate_package(args)
except (RuntimeError, BackendFailure, Conflict) as exc:
sys.stderr.write('{0}\n'.format(exc))
sys.exit(1)
if __name__ == '__main__':
main()
|
mkouhei/bootstrap-py | bootstrap_py/commands.py | parse_options | python | def parse_options(metadata):
parser = argparse.ArgumentParser(description='%(prog)s usage:',
prog=__prog__)
setoption(parser, metadata=metadata)
return parser | Parse argument options. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L72-L77 | [
"def setoption(parser, metadata=None):\n \"\"\"Set argument parser option.\"\"\"\n parser.add_argument('-v', action='version',\n version=__version__)\n subparsers = parser.add_subparsers(help='sub commands help')\n create_cmd = subparsers.add_parser('create')\n create_cmd.add_argument('name',\n help='Specify Python package name.')\n create_cmd.add_argument('-d', dest='description', action='store',\n help='Short description about your package.')\n create_cmd.add_argument('-a', dest='author', action='store',\n required=True,\n help='Python package author name.')\n create_cmd.add_argument('-e', dest='email', action='store',\n required=True,\n help='Python package author email address.')\n create_cmd.add_argument('-l', dest='license',\n choices=metadata.licenses().keys(),\n default='GPLv3+',\n help='Specify license. (default: %(default)s)')\n create_cmd.add_argument('-s', dest='status',\n choices=metadata.status().keys(),\n default='Alpha',\n help=('Specify development status. '\n '(default: %(default)s)'))\n create_cmd.add_argument('--no-check', action='store_true',\n help='No checking package name in PyPI.')\n create_cmd.add_argument('--with-samples', action='store_true',\n help='Generate package with sample code.')\n group = create_cmd.add_mutually_exclusive_group(required=True)\n group.add_argument('-U', dest='username', action='store',\n help='Specify GitHub username.')\n group.add_argument('-u', dest='url', action='store', type=valid_url,\n help='Python package homepage url.')\n create_cmd.add_argument('-o', dest='outdir', action='store',\n default=os.path.abspath(os.path.curdir),\n help='Specify output directory. (default: $PWD)')\n list_cmd = subparsers.add_parser('list')\n list_cmd.add_argument('-l', dest='licenses', action='store_true',\n help='show license choices.')\n"
] | # -*- coding: utf-8 -*-
"""bootstrap_py.commands."""
import os
import sys
import re
import argparse
from bootstrap_py import control, __prog__, __version__
from bootstrap_py.update import Update
from bootstrap_py.exceptions import BackendFailure, Conflict
def setoption(parser, metadata=None):
"""Set argument parser option."""
parser.add_argument('-v', action='version',
version=__version__)
subparsers = parser.add_subparsers(help='sub commands help')
create_cmd = subparsers.add_parser('create')
create_cmd.add_argument('name',
help='Specify Python package name.')
create_cmd.add_argument('-d', dest='description', action='store',
help='Short description about your package.')
create_cmd.add_argument('-a', dest='author', action='store',
required=True,
help='Python package author name.')
create_cmd.add_argument('-e', dest='email', action='store',
required=True,
help='Python package author email address.')
create_cmd.add_argument('-l', dest='license',
choices=metadata.licenses().keys(),
default='GPLv3+',
help='Specify license. (default: %(default)s)')
create_cmd.add_argument('-s', dest='status',
choices=metadata.status().keys(),
default='Alpha',
help=('Specify development status. '
'(default: %(default)s)'))
create_cmd.add_argument('--no-check', action='store_true',
help='No checking package name in PyPI.')
create_cmd.add_argument('--with-samples', action='store_true',
help='Generate package with sample code.')
group = create_cmd.add_mutually_exclusive_group(required=True)
group.add_argument('-U', dest='username', action='store',
help='Specify GitHub username.')
group.add_argument('-u', dest='url', action='store', type=valid_url,
help='Python package homepage url.')
create_cmd.add_argument('-o', dest='outdir', action='store',
default=os.path.abspath(os.path.curdir),
help='Specify output directory. (default: $PWD)')
list_cmd = subparsers.add_parser('list')
list_cmd.add_argument('-l', dest='licenses', action='store_true',
help='show license choices.')
def valid_url(url):
"""Validate url.
:rtype: str
:return: url
:param str url: package homepage url.
"""
regex = re.compile(
r'^(?:http)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(url):
raise argparse.ArgumentTypeError('"{0}" is invalid url.'.format(url))
return url
def main():
"""Execute main processes."""
try:
pkg_version = Update()
if pkg_version.updatable():
pkg_version.show_message()
metadata = control.retreive_metadata()
parser = parse_options(metadata)
argvs = sys.argv
if len(argvs) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
control.print_licences(args, metadata)
control.check_repository_existence(args)
control.check_package_existence(args)
control.generate_package(args)
except (RuntimeError, BackendFailure, Conflict) as exc:
sys.stderr.write('{0}\n'.format(exc))
sys.exit(1)
if __name__ == '__main__':
main()
|
mkouhei/bootstrap-py | bootstrap_py/commands.py | main | python | def main():
try:
pkg_version = Update()
if pkg_version.updatable():
pkg_version.show_message()
metadata = control.retreive_metadata()
parser = parse_options(metadata)
argvs = sys.argv
if len(argvs) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
control.print_licences(args, metadata)
control.check_repository_existence(args)
control.check_package_existence(args)
control.generate_package(args)
except (RuntimeError, BackendFailure, Conflict) as exc:
sys.stderr.write('{0}\n'.format(exc))
sys.exit(1) | Execute main processes. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L80-L99 | [
"def parse_options(metadata):\n \"\"\"Parse argument options.\"\"\"\n parser = argparse.ArgumentParser(description='%(prog)s usage:',\n prog=__prog__)\n setoption(parser, metadata=metadata)\n return parser\n",
"def retreive_metadata():\n \"\"\"Retrieve metadata.\n\n :rtype: bootstrap_py.classifiers.Classifiers\n :return: Classifiers()\n \"\"\"\n return Classifiers()\n",
"def print_licences(params, metadata):\n \"\"\"Print licenses.\n\n :param argparse.Namespace params: parameter\n :param bootstrap_py.classifier.Classifiers metadata: package metadata\n \"\"\"\n if hasattr(params, 'licenses'):\n if params.licenses:\n _pp(metadata.licenses_desc())\n sys.exit(0)\n",
"def check_repository_existence(params):\n \"\"\"Check repository existence.\n\n :param argparse.Namespace params: parameters\n \"\"\"\n repodir = os.path.join(params.outdir, params.name)\n if os.path.isdir(repodir):\n raise Conflict(\n 'Package repository \"{0}\" has already exists.'.format(repodir))\n",
"def check_package_existence(params):\n \"\"\"Check package existence.\n\n :param argparse.Namespace params: parameters\n \"\"\"\n if not params.no_check:\n pypi.package_existent(params.name)\n",
"def generate_package(params):\n \"\"\"Generate package repository.\n\n :param argparse.Namespace params: parameters\n \"\"\"\n pkg_data = package.PackageData(params)\n pkg_tree = package.PackageTree(pkg_data)\n pkg_tree.generate()\n pkg_tree.move()\n VCS(os.path.join(pkg_tree.outdir, pkg_tree.name), pkg_tree.pkg_data)\n",
"def updatable(self):\n \"\"\"bootstrap-py package updatable?.\"\"\"\n if self.latest_version > self.current_version:\n updatable_version = self.latest_version\n else:\n updatable_version = False\n return updatable_version\n"
] | # -*- coding: utf-8 -*-
"""bootstrap_py.commands."""
import os
import sys
import re
import argparse
from bootstrap_py import control, __prog__, __version__
from bootstrap_py.update import Update
from bootstrap_py.exceptions import BackendFailure, Conflict
def setoption(parser, metadata=None):
"""Set argument parser option."""
parser.add_argument('-v', action='version',
version=__version__)
subparsers = parser.add_subparsers(help='sub commands help')
create_cmd = subparsers.add_parser('create')
create_cmd.add_argument('name',
help='Specify Python package name.')
create_cmd.add_argument('-d', dest='description', action='store',
help='Short description about your package.')
create_cmd.add_argument('-a', dest='author', action='store',
required=True,
help='Python package author name.')
create_cmd.add_argument('-e', dest='email', action='store',
required=True,
help='Python package author email address.')
create_cmd.add_argument('-l', dest='license',
choices=metadata.licenses().keys(),
default='GPLv3+',
help='Specify license. (default: %(default)s)')
create_cmd.add_argument('-s', dest='status',
choices=metadata.status().keys(),
default='Alpha',
help=('Specify development status. '
'(default: %(default)s)'))
create_cmd.add_argument('--no-check', action='store_true',
help='No checking package name in PyPI.')
create_cmd.add_argument('--with-samples', action='store_true',
help='Generate package with sample code.')
group = create_cmd.add_mutually_exclusive_group(required=True)
group.add_argument('-U', dest='username', action='store',
help='Specify GitHub username.')
group.add_argument('-u', dest='url', action='store', type=valid_url,
help='Python package homepage url.')
create_cmd.add_argument('-o', dest='outdir', action='store',
default=os.path.abspath(os.path.curdir),
help='Specify output directory. (default: $PWD)')
list_cmd = subparsers.add_parser('list')
list_cmd.add_argument('-l', dest='licenses', action='store_true',
help='show license choices.')
def valid_url(url):
"""Validate url.
:rtype: str
:return: url
:param str url: package homepage url.
"""
regex = re.compile(
r'^(?:http)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not regex.match(url):
raise argparse.ArgumentTypeError('"{0}" is invalid url.'.format(url))
return url
def parse_options(metadata):
"""Parse argument options."""
parser = argparse.ArgumentParser(description='%(prog)s usage:',
prog=__prog__)
setoption(parser, metadata=metadata)
return parser
if __name__ == '__main__':
main()
|
mkouhei/bootstrap-py | bootstrap_py/package.py | PackageData._set_param | python | def _set_param(self, name, value):
if name == 'status':
setattr(self, name, self.metadata.status().get(value))
elif name == 'license':
setattr(self, name, self.metadata.licenses().get(value))
elif name == 'name':
setattr(self, name, value)
setattr(self, 'module_name', value.replace('-', '_'))
else:
setattr(self, name, value) | Set name:value property to Package object. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L32-L42 | [
"def status(self):\n \"\"\"Development status.\"\"\"\n return {self._acronym_status(l): l for l in self.resp_text.split('\\n')\n if l.startswith(self.prefix_status)}\n",
"def licenses(self):\n \"\"\"OSI Approved license.\"\"\"\n return {self._acronym_lic(l): l for l in self.resp_text.split('\\n')\n if l.startswith(self.prefix_lic)}\n"
] | class PackageData:
"""Package meta data class."""
#: Configured the default "version" of setup.setup().
default_version = '0.1.0'
#: Users should rewrite parameters after they generate Python package.
warning_message = '##### ToDo: Rewrite me #####' # pylint: disable=fixme
def __init__(self, args):
"""Initialize Package."""
self.metadata = Classifiers()
if hasattr(args, '__dict__'):
for name, value in vars(args).items():
self._set_param(name, value)
self._check_or_set_default_params()
def _check_or_set_default_params(self):
"""Check key and set default vaule when it does not exists."""
if not hasattr(self, 'date'):
self._set_param('date', datetime.utcnow().strftime('%Y-%m-%d'))
if not hasattr(self, 'version'):
self._set_param('version', self.default_version)
# pylint: disable=no-member
if not hasattr(self, 'description') or self.description is None:
getattr(self, '_set_param')('description', self.warning_message)
def to_dict(self):
"""Convert the package data to dict."""
return self.__dict__
|
mkouhei/bootstrap-py | bootstrap_py/package.py | PackageData._check_or_set_default_params | python | def _check_or_set_default_params(self):
if not hasattr(self, 'date'):
self._set_param('date', datetime.utcnow().strftime('%Y-%m-%d'))
if not hasattr(self, 'version'):
self._set_param('version', self.default_version)
# pylint: disable=no-member
if not hasattr(self, 'description') or self.description is None:
getattr(self, '_set_param')('description', self.warning_message) | Check key and set default vaule when it does not exists. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L44-L52 | [
"def _set_param(self, name, value):\n \"\"\"Set name:value property to Package object.\"\"\"\n if name == 'status':\n setattr(self, name, self.metadata.status().get(value))\n elif name == 'license':\n setattr(self, name, self.metadata.licenses().get(value))\n elif name == 'name':\n setattr(self, name, value)\n setattr(self, 'module_name', value.replace('-', '_'))\n else:\n setattr(self, name, value)\n"
] | class PackageData:
"""Package meta data class."""
#: Configured the default "version" of setup.setup().
default_version = '0.1.0'
#: Users should rewrite parameters after they generate Python package.
warning_message = '##### ToDo: Rewrite me #####' # pylint: disable=fixme
def __init__(self, args):
"""Initialize Package."""
self.metadata = Classifiers()
if hasattr(args, '__dict__'):
for name, value in vars(args).items():
self._set_param(name, value)
self._check_or_set_default_params()
def _set_param(self, name, value):
"""Set name:value property to Package object."""
if name == 'status':
setattr(self, name, self.metadata.status().get(value))
elif name == 'license':
setattr(self, name, self.metadata.licenses().get(value))
elif name == 'name':
setattr(self, name, value)
setattr(self, 'module_name', value.replace('-', '_'))
else:
setattr(self, name, value)
def to_dict(self):
"""Convert the package data to dict."""
return self.__dict__
|
mkouhei/bootstrap-py | bootstrap_py/package.py | PackageTree.move | python | def move(self):
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
shutil.move(self.tmpdir, os.path.join(self.outdir, self.name)) | Move directory from working directory to output directory. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L169-L173 | null | class PackageTree:
"""Package directory tree class."""
#: Jinja2 template name
template_name = 'bootstrap_py'
#: the suffix name of working directory for generating
suffix = '-bootstrap-py'
#: init filename
init = '__init__.py'
#: default permission
exec_perm = 0o755
#: include directories to packages
pkg_dirs = ['{module_name}', '{module_name}/tests']
def __init__(self, pkg_data):
"""Initialize."""
self.cwd = os.getcwd()
self.name = pkg_data.name
self.outdir = os.path.abspath(pkg_data.outdir)
self.tmpdir = tempfile.mkdtemp(suffix=self.suffix)
self.templates = Environment(loader=PackageLoader(self.template_name))
self.pkg_data = pkg_data
def _init_py(self, dir_path):
return os.path.join(self.tmpdir,
dir_path.format(**self.pkg_data.to_dict()),
self.init)
def _sample_py(self, file_path):
return os.path.join(self.tmpdir,
self.pkg_data.module_name,
os.path.splitext(file_path)[0])
def _tmpl_path(self, file_path):
return os.path.join(self.tmpdir, os.path.splitext(file_path)[0])
def _generate_dirs(self):
dirs = [os.path.dirname(tmpl)
for tmpl in self.templates.list_templates()
if tmpl.find('/') > -1] + self.pkg_dirs
for dir_path in dirs:
if not os.path.isdir(
os.path.join(self.tmpdir,
dir_path.format(**self.pkg_data.to_dict()))):
os.makedirs(os.path.join(self.tmpdir,
dir_path.format(
**self.pkg_data.to_dict())),
self.exec_perm)
def _generate_docs(self):
docs_path = os.path.join(self.tmpdir, 'docs')
os.makedirs(docs_path)
build_sphinx(self.pkg_data, docs_path)
def _list_module_dirs(self):
return [dir_path for dir_path in self.pkg_dirs
if dir_path.find('{module_name}') == 0]
def _generate_init(self):
tmpl = self.templates.get_template('__init__.py.j2')
for dir_path in self._list_module_dirs():
if not os.path.isfile(self._init_py(dir_path)):
with open(self._init_py(dir_path), 'w') as fobj:
# pylint: disable=no-member
fobj.write(
tmpl.render(**self.pkg_data.to_dict()) + '\n')
return True
def _generate_file(self, file_path):
tmpl = self.templates.get_template(file_path)
with open(self._tmpl_path(file_path), 'w') as fobj:
fobj.write(
tmpl.render(**self.pkg_data.to_dict()) + '\n')
return True
def _generate_exec_file(self, file_path):
self._generate_file(file_path)
os.chmod(self._tmpl_path(file_path), self.exec_perm)
return True
def _generate_samples(self, file_path):
if not self.pkg_data.with_samples:
return False
tmpl = self.templates.get_template(file_path)
if file_path == 'sample.py.j2':
with open(self._sample_py(file_path), 'w') as fobj:
fobj.write(
tmpl.render(
**self.pkg_data.to_dict()) + '\n')
elif file_path == 'test_sample.py.j2':
with open(self._sample_py(os.path.join('tests',
file_path)), 'w') as fobj:
fobj.write(
tmpl.render(
**self.pkg_data.to_dict()) + '\n')
return True
def _generate_files(self):
generator = (lambda f: guard(
g(self._generate_init, f == '__init__.py.j2'),
g(self._generate_exec_file, f == 'utils/pre-commit.j2', (f,)),
g(self._generate_samples, f.endswith('sample.py.j2'), (f,)),
g(self._generate_file, params=(f,))))
for file_path in self.templates.list_templates():
generator(file_path)
os.chdir(self.tmpdir)
os.symlink('../../README.rst', 'docs/source/README.rst')
os.chdir(self.cwd)
def clean(self):
"""Clean up working directory."""
shutil.rmtree(self.tmpdir)
def generate(self):
"""Generate package directory tree."""
self._generate_docs()
self._generate_dirs()
self._generate_files()
def vcs_init(self):
"""Initialize VCS repository."""
VCS(os.path.join(self.outdir, self.name), self.pkg_data)
|
mkouhei/bootstrap-py | bootstrap_py/package.py | PackageTree.vcs_init | python | def vcs_init(self):
VCS(os.path.join(self.outdir, self.name), self.pkg_data) | Initialize VCS repository. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L185-L187 | null | class PackageTree:
"""Package directory tree class."""
#: Jinja2 template name
template_name = 'bootstrap_py'
#: the suffix name of working directory for generating
suffix = '-bootstrap-py'
#: init filename
init = '__init__.py'
#: default permission
exec_perm = 0o755
#: include directories to packages
pkg_dirs = ['{module_name}', '{module_name}/tests']
def __init__(self, pkg_data):
"""Initialize."""
self.cwd = os.getcwd()
self.name = pkg_data.name
self.outdir = os.path.abspath(pkg_data.outdir)
self.tmpdir = tempfile.mkdtemp(suffix=self.suffix)
self.templates = Environment(loader=PackageLoader(self.template_name))
self.pkg_data = pkg_data
def _init_py(self, dir_path):
return os.path.join(self.tmpdir,
dir_path.format(**self.pkg_data.to_dict()),
self.init)
def _sample_py(self, file_path):
return os.path.join(self.tmpdir,
self.pkg_data.module_name,
os.path.splitext(file_path)[0])
def _tmpl_path(self, file_path):
return os.path.join(self.tmpdir, os.path.splitext(file_path)[0])
def _generate_dirs(self):
dirs = [os.path.dirname(tmpl)
for tmpl in self.templates.list_templates()
if tmpl.find('/') > -1] + self.pkg_dirs
for dir_path in dirs:
if not os.path.isdir(
os.path.join(self.tmpdir,
dir_path.format(**self.pkg_data.to_dict()))):
os.makedirs(os.path.join(self.tmpdir,
dir_path.format(
**self.pkg_data.to_dict())),
self.exec_perm)
def _generate_docs(self):
docs_path = os.path.join(self.tmpdir, 'docs')
os.makedirs(docs_path)
build_sphinx(self.pkg_data, docs_path)
def _list_module_dirs(self):
return [dir_path for dir_path in self.pkg_dirs
if dir_path.find('{module_name}') == 0]
def _generate_init(self):
tmpl = self.templates.get_template('__init__.py.j2')
for dir_path in self._list_module_dirs():
if not os.path.isfile(self._init_py(dir_path)):
with open(self._init_py(dir_path), 'w') as fobj:
# pylint: disable=no-member
fobj.write(
tmpl.render(**self.pkg_data.to_dict()) + '\n')
return True
def _generate_file(self, file_path):
tmpl = self.templates.get_template(file_path)
with open(self._tmpl_path(file_path), 'w') as fobj:
fobj.write(
tmpl.render(**self.pkg_data.to_dict()) + '\n')
return True
def _generate_exec_file(self, file_path):
self._generate_file(file_path)
os.chmod(self._tmpl_path(file_path), self.exec_perm)
return True
def _generate_samples(self, file_path):
if not self.pkg_data.with_samples:
return False
tmpl = self.templates.get_template(file_path)
if file_path == 'sample.py.j2':
with open(self._sample_py(file_path), 'w') as fobj:
fobj.write(
tmpl.render(
**self.pkg_data.to_dict()) + '\n')
elif file_path == 'test_sample.py.j2':
with open(self._sample_py(os.path.join('tests',
file_path)), 'w') as fobj:
fobj.write(
tmpl.render(
**self.pkg_data.to_dict()) + '\n')
return True
def _generate_files(self):
generator = (lambda f: guard(
g(self._generate_init, f == '__init__.py.j2'),
g(self._generate_exec_file, f == 'utils/pre-commit.j2', (f,)),
g(self._generate_samples, f.endswith('sample.py.j2'), (f,)),
g(self._generate_file, params=(f,))))
for file_path in self.templates.list_templates():
generator(file_path)
os.chdir(self.tmpdir)
os.symlink('../../README.rst', 'docs/source/README.rst')
os.chdir(self.cwd)
def move(self):
"""Move directory from working directory to output directory."""
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
shutil.move(self.tmpdir, os.path.join(self.outdir, self.name))
def clean(self):
"""Clean up working directory."""
shutil.rmtree(self.tmpdir)
def generate(self):
"""Generate package directory tree."""
self._generate_docs()
self._generate_dirs()
self._generate_files()
|
mkouhei/bootstrap-py | bootstrap_py/update.py | Update.updatable | python | def updatable(self):
if self.latest_version > self.current_version:
updatable_version = self.latest_version
else:
updatable_version = False
return updatable_version | bootstrap-py package updatable?. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L29-L35 | null | class Update:
"""bootstrap-py updata checker."""
badge_url = 'https://img.shields.io/pypi/v/bootstrap-py.svg'
name_space = '{http://www.w3.org/2000/svg}'
def __init__(self):
"""Initialize."""
self.current_version = 'v{0}'.format(__version__)
self.latest_version = self._latest_version()
def _latest_version(self):
try:
resp = requests.get(self.badge_url)
except requests.exceptions.ConnectionError:
return '0.0.0'
element_tree = fromstring(resp.text)
return element_tree.findall(
'{ns}g'.format(ns=self.name_space))[1].findall(
'{ns}text'.format(ns=self.name_space))[2].text
def show_message(self):
"""Show message updatable."""
print(
'current version: {current_version}\n'
'latest version : {latest_version}'.format(
current_version=self.current_version,
latest_version=self.latest_version))
|
mkouhei/bootstrap-py | bootstrap_py/update.py | Update.show_message | python | def show_message(self):
print(
'current version: {current_version}\n'
'latest version : {latest_version}'.format(
current_version=self.current_version,
latest_version=self.latest_version)) | Show message updatable. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L37-L43 | null | class Update:
"""bootstrap-py updata checker."""
badge_url = 'https://img.shields.io/pypi/v/bootstrap-py.svg'
name_space = '{http://www.w3.org/2000/svg}'
def __init__(self):
"""Initialize."""
self.current_version = 'v{0}'.format(__version__)
self.latest_version = self._latest_version()
def _latest_version(self):
try:
resp = requests.get(self.badge_url)
except requests.exceptions.ConnectionError:
return '0.0.0'
element_tree = fromstring(resp.text)
return element_tree.findall(
'{ns}g'.format(ns=self.name_space))[1].findall(
'{ns}text'.format(ns=self.name_space))[2].text
def updatable(self):
"""bootstrap-py package updatable?."""
if self.latest_version > self.current_version:
updatable_version = self.latest_version
else:
updatable_version = False
return updatable_version
|
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.status | python | def status(self):
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)} | Development status. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L33-L36 | null | class Classifiers:
"""Classifiers."""
#: list_classifiers url
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
#: prefix status
prefix_status = 'Development Status :: '
#: prefix licences
prefix_lic = 'License :: OSI Approved :: '
timeout = 5.000
def __init__(self):
"""Initialize."""
try:
self.resp_text = requests.get(self.url, timeout=self.timeout).text
except requests.exceptions.ConnectionError:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/classifiers.txt'))
with open(file_path) as fobj:
self.resp_text = fobj.read()
@staticmethod
def _acronym_status(status_statement):
"""Convert development status acronym."""
return status_statement.split(' - ')[1]
def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
def licenses_desc(self):
"""Remove prefix."""
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
def _acronym_lic(self, license_statement):
"""Convert license acronym."""
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence
|
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.licenses | python | def licenses(self):
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | OSI Approved license. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L43-L46 | null | class Classifiers:
"""Classifiers."""
#: list_classifiers url
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
#: prefix status
prefix_status = 'Development Status :: '
#: prefix licences
prefix_lic = 'License :: OSI Approved :: '
timeout = 5.000
def __init__(self):
"""Initialize."""
try:
self.resp_text = requests.get(self.url, timeout=self.timeout).text
except requests.exceptions.ConnectionError:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/classifiers.txt'))
with open(file_path) as fobj:
self.resp_text = fobj.read()
def status(self):
"""Development status."""
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)}
@staticmethod
def _acronym_status(status_statement):
"""Convert development status acronym."""
return status_statement.split(' - ')[1]
def licenses_desc(self):
"""Remove prefix."""
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
def _acronym_lic(self, license_statement):
"""Convert license acronym."""
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence
|
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.licenses_desc | python | def licenses_desc(self):
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | Remove prefix. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L48-L52 | null | class Classifiers:
"""Classifiers."""
#: list_classifiers url
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
#: prefix status
prefix_status = 'Development Status :: '
#: prefix licences
prefix_lic = 'License :: OSI Approved :: '
timeout = 5.000
def __init__(self):
"""Initialize."""
try:
self.resp_text = requests.get(self.url, timeout=self.timeout).text
except requests.exceptions.ConnectionError:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/classifiers.txt'))
with open(file_path) as fobj:
self.resp_text = fobj.read()
def status(self):
"""Development status."""
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)}
@staticmethod
def _acronym_status(status_statement):
"""Convert development status acronym."""
return status_statement.split(' - ')[1]
def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
def _acronym_lic(self, license_statement):
"""Convert license acronym."""
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence
|
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers._acronym_lic | python | def _acronym_lic(self, license_statement):
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence | Convert license acronym. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L54-L67 | null | class Classifiers:
"""Classifiers."""
#: list_classifiers url
url = 'https://pypi.python.org/pypi?%3Aaction=list_classifiers'
#: prefix status
prefix_status = 'Development Status :: '
#: prefix licences
prefix_lic = 'License :: OSI Approved :: '
timeout = 5.000
def __init__(self):
"""Initialize."""
try:
self.resp_text = requests.get(self.url, timeout=self.timeout).text
except requests.exceptions.ConnectionError:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/classifiers.txt'))
with open(file_path) as fobj:
self.resp_text = fobj.read()
def status(self):
"""Development status."""
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)}
@staticmethod
def _acronym_status(status_statement):
"""Convert development status acronym."""
return status_statement.split(' - ')[1]
def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
def licenses_desc(self):
"""Remove prefix."""
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)}
|
mkouhei/bootstrap-py | bootstrap_py/pypi.py | package_existent | python | def package_existent(name):
try:
response = requests.get(PYPI_URL.format(name))
if response.ok:
msg = ('[error] "{0}" is registered already in PyPI.\n'
'\tSpecify another package name.').format(name)
raise Conflict(msg)
except (socket.gaierror,
Timeout,
ConnectionError,
HTTPError) as exc:
raise BackendFailure(exc) | Search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/pypi.py#L12-L33 | null | # -*- coding: utf-8 -*-
"""bootstrap_py.pypi."""
import requests
import socket
from requests.exceptions import Timeout, HTTPError
from bootstrap_py.exceptions import BackendFailure, Conflict
#: PyPI JSONC API url
PYPI_URL = 'https://pypi.org/pypi/{0}/json'
|
mkouhei/bootstrap-py | bootstrap_py/vcs.py | VCS._config | python | def _config(self):
cfg_wr = self.repo.config_writer()
cfg_wr.add_section('user')
cfg_wr.set_value('user', 'name', self.metadata.author)
cfg_wr.set_value('user', 'email', self.metadata.email)
cfg_wr.release() | Execute git config. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L35-L41 | null | class VCS:
"""VCS class."""
def __init__(self, repo_dir, metadata):
"""Initialize."""
self.metadata = metadata
self.repo = git.Repo.init(os.path.join(repo_dir))
self._config()
self._add_index()
self._initial_commit()
if hasattr(self.metadata, 'username') and self.metadata.username:
self._remote_add()
# work around: git.Repo.init write ref to .git/HEAD without line feed.
with open(os.path.join(repo_dir, '.git/HEAD')) as fobj:
data = fobj.read()
if data.rfind('\n') == -1:
with open(os.path.join(repo_dir, '.git/HEAD'), 'a') as fobj:
fobj.write('\n')
# adds pre-commit hook
os.symlink('../../utils/pre-commit',
os.path.join(repo_dir, '.git/hooks/pre-commit'))
def _add_index(self):
"""Execute git add ."""
self.repo.index.add(self.repo.untracked_files)
def _initial_commit(self):
"""Initialize commit."""
self.repo.index.commit('Initial commit.')
def _remote_add(self):
"""Execute git remote add."""
self.repo.create_remote(
'origin',
'git@github.com:{username}/{repo}.git'.format(
username=self.metadata.username,
repo=self.metadata.name))
|
mkouhei/bootstrap-py | bootstrap_py/vcs.py | VCS._remote_add | python | def _remote_add(self):
self.repo.create_remote(
'origin',
'git@github.com:{username}/{repo}.git'.format(
username=self.metadata.username,
repo=self.metadata.name)) | Execute git remote add. | train | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L47-L53 | null | class VCS:
"""VCS class."""
def __init__(self, repo_dir, metadata):
"""Initialize."""
self.metadata = metadata
self.repo = git.Repo.init(os.path.join(repo_dir))
self._config()
self._add_index()
self._initial_commit()
if hasattr(self.metadata, 'username') and self.metadata.username:
self._remote_add()
# work around: git.Repo.init write ref to .git/HEAD without line feed.
with open(os.path.join(repo_dir, '.git/HEAD')) as fobj:
data = fobj.read()
if data.rfind('\n') == -1:
with open(os.path.join(repo_dir, '.git/HEAD'), 'a') as fobj:
fobj.write('\n')
# adds pre-commit hook
os.symlink('../../utils/pre-commit',
os.path.join(repo_dir, '.git/hooks/pre-commit'))
def _add_index(self):
"""Execute git add ."""
self.repo.index.add(self.repo.untracked_files)
def _config(self):
"""Execute git config."""
cfg_wr = self.repo.config_writer()
cfg_wr.add_section('user')
cfg_wr.set_value('user', 'name', self.metadata.author)
cfg_wr.set_value('user', 'email', self.metadata.email)
cfg_wr.release()
def _initial_commit(self):
"""Initialize commit."""
self.repo.index.commit('Initial commit.')
|
palantir/typedjsonrpc | typedjsonrpc/errors.py | InternalError.from_error | python | def from_error(exc_info, json_encoder, debug_url=None):
exc = exc_info[1]
data = exc.__dict__.copy()
for key, value in data.items():
try:
json_encoder.encode(value)
except TypeError:
data[key] = repr(value)
data["traceback"] = "".join(traceback.format_exception(*exc_info))
if debug_url is not None:
data["debug_url"] = debug_url
return InternalError(data) | Wraps another Exception in an InternalError.
:param exc_info: The exception info for the wrapped exception
:type exc_info: (type, object, traceback)
:type json_encoder: json.JSONEncoder
:type debug_url: str | None
:rtype: InternalError
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0
Stringifies non-JSON-serializable objects | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/errors.py#L99-L122 | null | class InternalError(Error):
"""Internal JSON-RPC error.
.. versionadded:: 0.1.0
"""
code = -32603
message = "Internal error"
status_code = 500
@staticmethod
|
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | validate_params_match | python | def validate_params_match(method, parameters):
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters") | Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L27-L55 | null | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for checking parameter declarations and parameter types."""
from __future__ import absolute_import, division, print_function
import inspect
import six
from .errors import InvalidParamsError, InvalidReturnTypeError
def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type))
def check_type_declaration(parameter_names, parameter_types):
"""Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type]
"""
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name))
def check_return_type(value, expected_type, strict_floats):
"""Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type))
def _is_instance(value, expected_type, strict_floats):
if expected_type is float and not strict_floats:
return isinstance(value, (six.integer_types, float))
if expected_type in six.integer_types:
return isinstance(value, six.integer_types)
return isinstance(value, expected_type)
|
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_types | python | def check_types(parameters, parameter_types, strict_floats):
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type)) | Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L58-L73 | [
"def _is_instance(value, expected_type, strict_floats):\n if expected_type is float and not strict_floats:\n return isinstance(value, (six.integer_types, float))\n if expected_type in six.integer_types:\n return isinstance(value, six.integer_types)\n\n return isinstance(value, expected_type)\n"
] | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for checking parameter declarations and parameter types."""
from __future__ import absolute_import, division, print_function
import inspect
import six
from .errors import InvalidParamsError, InvalidReturnTypeError
def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters")
def check_type_declaration(parameter_names, parameter_types):
"""Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type]
"""
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name))
def check_return_type(value, expected_type, strict_floats):
"""Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type))
def _is_instance(value, expected_type, strict_floats):
if expected_type is float and not strict_floats:
return isinstance(value, (six.integer_types, float))
if expected_type in six.integer_types:
return isinstance(value, six.integer_types)
return isinstance(value, expected_type)
|
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_type_declaration | python | def check_type_declaration(parameter_names, parameter_types):
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name)) | Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L76-L90 | null | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for checking parameter declarations and parameter types."""
from __future__ import absolute_import, division, print_function
import inspect
import six
from .errors import InvalidParamsError, InvalidReturnTypeError
def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters")
def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type))
def check_return_type(value, expected_type, strict_floats):
"""Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type))
def _is_instance(value, expected_type, strict_floats):
if expected_type is float and not strict_floats:
return isinstance(value, (six.integer_types, float))
if expected_type in six.integer_types:
return isinstance(value, six.integer_types)
return isinstance(value, expected_type)
|
palantir/typedjsonrpc | typedjsonrpc/parameter_checker.py | check_return_type | python | def check_return_type(value, expected_type, strict_floats):
if expected_type is None:
if value is not None:
raise InvalidReturnTypeError("Returned value is '{}' but None was expected"
.format(value))
elif not _is_instance(value, expected_type, strict_floats):
raise InvalidReturnTypeError("Type of return value '{}' does not match expected type {}"
.format(value, expected_type)) | Checks that the given return value has the correct type.
:param value: Value returned by the method
:type value: object
:param expected_type: Expected return type
:type expected_type: type
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/parameter_checker.py#L93-L109 | [
"def _is_instance(value, expected_type, strict_floats):\n if expected_type is float and not strict_floats:\n return isinstance(value, (six.integer_types, float))\n if expected_type in six.integer_types:\n return isinstance(value, six.integer_types)\n\n return isinstance(value, expected_type)\n"
] | # coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for checking parameter declarations and parameter types."""
from __future__ import absolute_import, division, print_function
import inspect
import six
from .errors import InvalidParamsError, InvalidReturnTypeError
def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters")
def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type))
def check_type_declaration(parameter_names, parameter_types):
"""Checks that exactly the given parameter names have declared types.
:param parameter_names: The names of the parameters in the method declaration
:type parameter_names: list[str]
:param parameter_types: Parameter type by name
:type parameter_types: dict[str, type]
"""
if len(parameter_names) != len(parameter_types):
raise Exception("Number of method parameters ({}) does not match number of "
"declared types ({})"
.format(len(parameter_names), len(parameter_types)))
for parameter_name in parameter_names:
if parameter_name not in parameter_types:
raise Exception("Parameter '{}' does not have a declared type".format(parameter_name))
def _is_instance(value, expected_type, strict_floats):
if expected_type is float and not strict_floats:
return isinstance(value, (six.integer_types, float))
if expected_type in six.integer_types:
return isinstance(value, six.integer_types)
return isinstance(value, expected_type)
|
palantir/typedjsonrpc | typedjsonrpc/server.py | Server.wsgi_app | python | def wsgi_app(self, environ, start_response):
@_LOCAL_MANAGER.middleware
def _wrapped_app(environ, start_response):
request = Request(environ)
setattr(_local, _CURRENT_REQUEST_KEY, request)
response = self._dispatch_request(request)
return response(environ, start_response)
return _wrapped_app(environ, start_response) | A basic WSGI app | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L101-L109 | null | class Server(object):
"""A basic WSGI-compatible server for typedjsonrpc endpoints.
:attribute registry: The registry for this server
:type registry: typedjsonrpc.registry.Registry
.. versionadded:: 0.1.0
.. versionchanged:: 0.4.0 Now returns HTTP status codes
"""
def __init__(self, registry, endpoint=DEFAULT_API_ENDPOINT_NAME):
"""
:param registry: The JSON-RPC registry to use
:type registry: typedjsonrpc.registry.Registry
:param endpoint: The endpoint to publish JSON-RPC endpoints. Default "/api".
:type endpoint: str
"""
self.registry = registry
self._endpoint = endpoint
self._url_map = Map([Rule(endpoint, endpoint=self._endpoint)])
self._before_first_request_funcs = []
self._after_first_request_handled = False
self._before_first_request_lock = Lock()
def _dispatch_request(self, request):
self._try_trigger_before_first_request_funcs()
adapter = self._url_map.bind_to_environ(request.environ)
endpoint, _ = adapter.match()
if endpoint == self._endpoint:
return self._dispatch_jsonrpc_request(request)
else:
abort(404)
def _dispatch_jsonrpc_request(self, request):
json_output = self.registry.dispatch(request)
if json_output is None:
return Response(status=204)
return Response(json_output,
mimetype="application/json",
status=self._determine_status_code(json_output))
def _determine_status_code(self, json_output):
output = self.registry.json_decoder.decode(json_output)
if isinstance(output, list) or "result" in output:
return 200
else:
assert "error" in output, "JSON-RPC is malformed and doesn't contain result or error"
return get_status_code_from_error_code(output["error"]["code"])
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def run(self, host, port, **options):
"""For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
"""
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options)
def _try_trigger_before_first_request_funcs(self): # pylint: disable=C0103
"""Runs each function from ``self.before_first_request_funcs`` once and only once."""
if self._after_first_request_handled:
return
else:
with self._before_first_request_lock:
if self._after_first_request_handled:
return
for func in self._before_first_request_funcs:
func()
self._after_first_request_handled = True
def register_before_first_request(self, func):
"""Registers a function to be called once before the first served request.
:param func: Function called
:type func: () -> object
.. versionadded:: 0.1.0
"""
self._before_first_request_funcs.append(func)
|
palantir/typedjsonrpc | typedjsonrpc/server.py | Server.run | python | def run(self, host, port, **options):
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options) | For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L114-L127 | null | class Server(object):
"""A basic WSGI-compatible server for typedjsonrpc endpoints.
:attribute registry: The registry for this server
:type registry: typedjsonrpc.registry.Registry
.. versionadded:: 0.1.0
.. versionchanged:: 0.4.0 Now returns HTTP status codes
"""
def __init__(self, registry, endpoint=DEFAULT_API_ENDPOINT_NAME):
"""
:param registry: The JSON-RPC registry to use
:type registry: typedjsonrpc.registry.Registry
:param endpoint: The endpoint to publish JSON-RPC endpoints. Default "/api".
:type endpoint: str
"""
self.registry = registry
self._endpoint = endpoint
self._url_map = Map([Rule(endpoint, endpoint=self._endpoint)])
self._before_first_request_funcs = []
self._after_first_request_handled = False
self._before_first_request_lock = Lock()
def _dispatch_request(self, request):
self._try_trigger_before_first_request_funcs()
adapter = self._url_map.bind_to_environ(request.environ)
endpoint, _ = adapter.match()
if endpoint == self._endpoint:
return self._dispatch_jsonrpc_request(request)
else:
abort(404)
def _dispatch_jsonrpc_request(self, request):
json_output = self.registry.dispatch(request)
if json_output is None:
return Response(status=204)
return Response(json_output,
mimetype="application/json",
status=self._determine_status_code(json_output))
def _determine_status_code(self, json_output):
output = self.registry.json_decoder.decode(json_output)
if isinstance(output, list) or "result" in output:
return 200
else:
assert "error" in output, "JSON-RPC is malformed and doesn't contain result or error"
return get_status_code_from_error_code(output["error"]["code"])
def wsgi_app(self, environ, start_response):
"""A basic WSGI app"""
@_LOCAL_MANAGER.middleware
def _wrapped_app(environ, start_response):
request = Request(environ)
setattr(_local, _CURRENT_REQUEST_KEY, request)
response = self._dispatch_request(request)
return response(environ, start_response)
return _wrapped_app(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def _try_trigger_before_first_request_funcs(self): # pylint: disable=C0103
"""Runs each function from ``self.before_first_request_funcs`` once and only once."""
if self._after_first_request_handled:
return
else:
with self._before_first_request_lock:
if self._after_first_request_handled:
return
for func in self._before_first_request_funcs:
func()
self._after_first_request_handled = True
def register_before_first_request(self, func):
"""Registers a function to be called once before the first served request.
:param func: Function called
:type func: () -> object
.. versionadded:: 0.1.0
"""
self._before_first_request_funcs.append(func)
|
palantir/typedjsonrpc | typedjsonrpc/server.py | Server._try_trigger_before_first_request_funcs | python | def _try_trigger_before_first_request_funcs(self): # pylint: disable=C0103
if self._after_first_request_handled:
return
else:
with self._before_first_request_lock:
if self._after_first_request_handled:
return
for func in self._before_first_request_funcs:
func()
self._after_first_request_handled = True | Runs each function from ``self.before_first_request_funcs`` once and only once. | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L129-L139 | null | class Server(object):
"""A basic WSGI-compatible server for typedjsonrpc endpoints.
:attribute registry: The registry for this server
:type registry: typedjsonrpc.registry.Registry
.. versionadded:: 0.1.0
.. versionchanged:: 0.4.0 Now returns HTTP status codes
"""
def __init__(self, registry, endpoint=DEFAULT_API_ENDPOINT_NAME):
"""
:param registry: The JSON-RPC registry to use
:type registry: typedjsonrpc.registry.Registry
:param endpoint: The endpoint to publish JSON-RPC endpoints. Default "/api".
:type endpoint: str
"""
self.registry = registry
self._endpoint = endpoint
self._url_map = Map([Rule(endpoint, endpoint=self._endpoint)])
self._before_first_request_funcs = []
self._after_first_request_handled = False
self._before_first_request_lock = Lock()
def _dispatch_request(self, request):
self._try_trigger_before_first_request_funcs()
adapter = self._url_map.bind_to_environ(request.environ)
endpoint, _ = adapter.match()
if endpoint == self._endpoint:
return self._dispatch_jsonrpc_request(request)
else:
abort(404)
def _dispatch_jsonrpc_request(self, request):
json_output = self.registry.dispatch(request)
if json_output is None:
return Response(status=204)
return Response(json_output,
mimetype="application/json",
status=self._determine_status_code(json_output))
def _determine_status_code(self, json_output):
output = self.registry.json_decoder.decode(json_output)
if isinstance(output, list) or "result" in output:
return 200
else:
assert "error" in output, "JSON-RPC is malformed and doesn't contain result or error"
return get_status_code_from_error_code(output["error"]["code"])
def wsgi_app(self, environ, start_response):
"""A basic WSGI app"""
@_LOCAL_MANAGER.middleware
def _wrapped_app(environ, start_response):
request = Request(environ)
setattr(_local, _CURRENT_REQUEST_KEY, request)
response = self._dispatch_request(request)
return response(environ, start_response)
return _wrapped_app(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def run(self, host, port, **options):
"""For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
"""
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options)
def register_before_first_request(self, func):
"""Registers a function to be called once before the first served request.
:param func: Function called
:type func: () -> object
.. versionadded:: 0.1.0
"""
self._before_first_request_funcs.append(func)
|
palantir/typedjsonrpc | typedjsonrpc/server.py | DebuggedJsonRpcApplication.debug_application | python | def debug_application(self, environ, start_response):
adapter = self._debug_map.bind_to_environ(environ)
if adapter.test():
_, args = adapter.match()
return self.handle_debug(environ, start_response, args["traceback_id"])
else:
return super(DebuggedJsonRpcApplication, self).debug_application(environ,
start_response) | Run the application and preserve the traceback frames.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> None
:rtype: generator[str]
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L177-L194 | null | class DebuggedJsonRpcApplication(DebuggedApplication):
"""A JSON-RPC-specific debugged application.
This differs from DebuggedApplication since the normal debugger assumes you
are hitting the endpoint from a web browser.
A returned response will be JSON of the form: ``{"traceback_id": <id>}`` which
you can use to hit the endpoint ``http://<host>:<port>/debug/<traceback_id>``.
.. versionadded:: 0.1.0
.. WARNING:: **Security vulnerability**
This should never be used in production because users have arbitrary shell
access in debug mode.
"""
def __init__(self, app, **kwargs):
"""
:param app: The wsgi application to be debugged
:type app: typedjsonrpc.server.Server
:param kwargs: The arguments to pass to the DebuggedApplication
"""
super(DebuggedJsonRpcApplication, self).__init__(app, **kwargs)
self._debug_map = Map([Rule("/debug/<int:traceback_id>", endpoint="debug")])
def handle_debug(self, environ, start_response, traceback_id):
"""Handles the debug endpoint for inspecting previous errors.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> NoneType
:param traceback_id: The id of the traceback to inspect
:type traceback_id: int
.. versionadded:: 0.1.0
"""
if traceback_id not in self.app.registry.tracebacks:
abort(404)
self._copy_over_traceback(traceback_id)
traceback = self.tracebacks[traceback_id]
rendered = traceback.render_full(evalex=self.evalex, secret=self.secret)
response = Response(rendered.encode('utf-8', 'replace'),
headers=[('Content-Type', 'text/html; charset=utf-8'),
('X-XSS-Protection', '0')])
return response(environ, start_response)
def _copy_over_traceback(self, traceback_id):
if traceback_id not in self.tracebacks:
traceback = self.app.registry.tracebacks[traceback_id]
self.tracebacks[traceback_id] = traceback
for frame in traceback.frames:
self.frames[frame.id] = frame
|
palantir/typedjsonrpc | typedjsonrpc/server.py | DebuggedJsonRpcApplication.handle_debug | python | def handle_debug(self, environ, start_response, traceback_id):
if traceback_id not in self.app.registry.tracebacks:
abort(404)
self._copy_over_traceback(traceback_id)
traceback = self.tracebacks[traceback_id]
rendered = traceback.render_full(evalex=self.evalex, secret=self.secret)
response = Response(rendered.encode('utf-8', 'replace'),
headers=[('Content-Type', 'text/html; charset=utf-8'),
('X-XSS-Protection', '0')])
return response(environ, start_response) | Handles the debug endpoint for inspecting previous errors.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> NoneType
:param traceback_id: The id of the traceback to inspect
:type traceback_id: int
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/server.py#L196-L216 | null | class DebuggedJsonRpcApplication(DebuggedApplication):
"""A JSON-RPC-specific debugged application.
This differs from DebuggedApplication since the normal debugger assumes you
are hitting the endpoint from a web browser.
A returned response will be JSON of the form: ``{"traceback_id": <id>}`` which
you can use to hit the endpoint ``http://<host>:<port>/debug/<traceback_id>``.
.. versionadded:: 0.1.0
.. WARNING:: **Security vulnerability**
This should never be used in production because users have arbitrary shell
access in debug mode.
"""
def __init__(self, app, **kwargs):
"""
:param app: The wsgi application to be debugged
:type app: typedjsonrpc.server.Server
:param kwargs: The arguments to pass to the DebuggedApplication
"""
super(DebuggedJsonRpcApplication, self).__init__(app, **kwargs)
self._debug_map = Map([Rule("/debug/<int:traceback_id>", endpoint="debug")])
def debug_application(self, environ, start_response):
"""Run the application and preserve the traceback frames.
:param environ: The environment which is passed into the wsgi application
:type environ: dict[str, object]
:param start_response: The start_response function of the wsgi application
:type start_response: (str, list[(str, str)]) -> None
:rtype: generator[str]
.. versionadded:: 0.1.0
"""
adapter = self._debug_map.bind_to_environ(environ)
if adapter.test():
_, args = adapter.match()
return self.handle_debug(environ, start_response, args["traceback_id"])
else:
return super(DebuggedJsonRpcApplication, self).debug_application(environ,
start_response)
def _copy_over_traceback(self, traceback_id):
if traceback_id not in self.tracebacks:
traceback = self.app.registry.tracebacks[traceback_id]
self.tracebacks[traceback_id] = traceback
for frame in traceback.frames:
self.frames[frame.id] = frame
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry.dispatch | python | def dispatch(self, request):
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result) | Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L95-L118 | [
"def _handle_exceptions(self, method, is_notification=False, msg_id=None):\n try:\n return method(), False\n except Error as exc:\n if not is_notification:\n if self.debug:\n debug_url = self._store_traceback()\n exc.data = {\"message\": exc.data, \"debug_url\": debug_url}\n return Registry._create_error_response(msg_id, exc), True\n except Exception as exc: # pylint: disable=broad-except\n if not is_notification:\n exc_info = sys.exc_info()\n if self.debug:\n debug_url = self._store_traceback()\n else:\n debug_url = None\n exception_message = \"id: {}, debug_url: {}\".format(msg_id, debug_url)\n self._logger.exception(exception_message)\n new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)\n return Registry._create_error_response(msg_id, new_error), True\n",
"def _encode_complete_result(self, result):\n if isinstance(result, list):\n return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'\n else:\n return self._encode_single_result(result)\n"
] | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature)
def method(self, returns, **parameter_types):
"""Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0
"""
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method
@staticmethod
def _collect_parameters(parameter_names, args, kwargs, defaults):
"""Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object]
"""
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _get_request_messages(self, request):
"""Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object]
"""
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg]
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry.register | python | def register(self, name, method, method_signature=None):
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature) | Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L218-L232 | null | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def dispatch(self, request):
"""Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0
"""
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def method(self, returns, **parameter_types):
"""Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0
"""
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method
@staticmethod
def _collect_parameters(parameter_names, args, kwargs, defaults):
"""Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object]
"""
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _get_request_messages(self, request):
"""Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object]
"""
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg]
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry.method | python | def method(self, returns, **parameter_types):
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method | Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0 | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L234-L291 | null | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def dispatch(self, request):
"""Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0
"""
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature)
@staticmethod
def _collect_parameters(parameter_names, args, kwargs, defaults):
"""Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object]
"""
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _get_request_messages(self, request):
"""Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object]
"""
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg]
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry._collect_parameters | python | def _collect_parameters(parameter_names, args, kwargs, defaults):
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters | Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L294-L317 | null | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def dispatch(self, request):
"""Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0
"""
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature)
def method(self, returns, **parameter_types):
"""Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0
"""
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method
@staticmethod
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _get_request_messages(self, request):
"""Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object]
"""
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg]
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry._get_request_messages | python | def _get_request_messages(self, request):
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg] | Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L332-L348 | null | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def dispatch(self, request):
"""Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0
"""
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature)
def method(self, returns, **parameter_types):
"""Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0
"""
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method
@staticmethod
def _collect_parameters(parameter_names, args, kwargs, defaults):
"""Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object]
"""
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"]))
|
palantir/typedjsonrpc | typedjsonrpc/registry.py | Registry._check_request | python | def _check_request(self, msg):
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"])) | Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object] | train | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/registry.py#L350-L372 | null | class Registry(object):
"""The registry for storing and calling jsonrpc methods.
:attribute debug: Debug option which enables recording of tracebacks
:type debug: bool
:attribute tracebacks: Tracebacks for debugging
:type tracebacks: dict[int, werkzeug.debug.tbtools.Traceback]
.. versionadded:: 0.1.0
"""
json_encoder = json.JSONEncoder()
"""The JSON encoder to use. Defaults to :class:`json.JSONEncoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
json_decoder = json.JSONDecoder()
"""The JSON decoder to use. Defaults to :class:`json.JSONDecoder`
.. versionadded:: 0.1.0
.. versionchanged:: 0.2.0 Changed from class to instance
"""
def __init__(self,
debug=False,
strict_floats=True):
"""
:param debug: If True, the registry records tracebacks for debugging purposes
:type debug: bool
:param strict_floats: If True, the registry does not allow ints as float parameters
:type strict_floats: bool
.. versionchanged:: 0.4.0 Added strict_floats option
"""
self._name_to_method_info = {}
self._register_describe()
self.debug = debug
self._strict_floats = strict_floats
self._logger = _get_default_logger()
self.tracebacks = {}
def _register_describe(self):
def _describe():
return self.describe()
_describe.__doc__ = self.describe.__doc__
describe_signature = MethodSignature.create([], {}, dict)
self.register("rpc.describe", _describe, describe_signature)
def dispatch(self, request):
"""Takes a request and dispatches its data to a jsonrpc method.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: json output of the corresponding method
:rtype: str
.. versionadded:: 0.1.0
"""
def _wrapped():
messages = self._get_request_messages(request)
results = [self._dispatch_and_handle_errors(message) for message in messages]
non_notification_results = [x for x in results if x is not None]
if len(non_notification_results) == 0:
return None
elif len(messages) == 1:
return non_notification_results[0]
else:
return non_notification_results
result, _ = self._handle_exceptions(_wrapped)
if result is not None:
return self._encode_complete_result(result)
def _dispatch_and_handle_errors(self, msg):
is_notification = isinstance(msg, dict) and "id" not in msg
def _wrapped():
result = self._dispatch_message(msg)
if not is_notification:
return Registry._create_result_response(msg["id"], result)
result, _ = self._handle_exceptions(_wrapped, is_notification, self._get_id_if_known(msg))
return result
def _handle_exceptions(self, method, is_notification=False, msg_id=None):
try:
return method(), False
except Error as exc:
if not is_notification:
if self.debug:
debug_url = self._store_traceback()
exc.data = {"message": exc.data, "debug_url": debug_url}
return Registry._create_error_response(msg_id, exc), True
except Exception as exc: # pylint: disable=broad-except
if not is_notification:
exc_info = sys.exc_info()
if self.debug:
debug_url = self._store_traceback()
else:
debug_url = None
exception_message = "id: {}, debug_url: {}".format(msg_id, debug_url)
self._logger.exception(exception_message)
new_error = InternalError.from_error(exc_info, self.json_encoder, debug_url)
return Registry._create_error_response(msg_id, new_error), True
def _encode_complete_result(self, result):
if isinstance(result, list):
return '[' + ','.join([self._encode_single_result(res) for res in result]) + ']'
else:
return self._encode_single_result(result)
def _encode_single_result(self, result):
msg_id = Registry._get_id_if_known(result)
is_notification = msg_id is None
def _encode():
return self.json_encoder.encode(result)
encoded, is_error = self._handle_exceptions(_encode,
is_notification=is_notification,
msg_id=msg_id)
if is_error:
# Fall back to default because previous encoding didn't work.
return self.json_encoder.encode(encoded)
else:
return encoded
def _store_traceback(self):
traceback = get_current_traceback(skip=1,
show_hidden_frames=False,
ignore_system_exceptions=True)
self.tracebacks[traceback.id] = traceback
return "/debug/{}".format(traceback.id)
@staticmethod
def _get_id_if_known(msg):
if isinstance(msg, dict) and "id" in msg:
return msg["id"]
else:
return None
def _dispatch_message(self, msg):
self._check_request(msg)
method = self._name_to_method_info[msg["method"]].method
params = msg.get("params", [])
parameter_checker.validate_params_match(method, params)
if isinstance(params, list):
result = method(*params)
elif isinstance(params, dict):
result = method(**params)
else:
raise InvalidRequestError("Given params '{}' are neither a list nor a dict."
.format(msg["params"]))
return result
@staticmethod
def _create_result_response(msg_id, result):
return {
"jsonrpc": "2.0",
"id": msg_id,
"result": result,
}
@staticmethod
def _create_error_response(msg_id, exc):
return {
"jsonrpc": "2.0",
"id": msg_id,
"error": exc.as_error_object(),
}
def register(self, name, method, method_signature=None):
"""Registers a method with a given name and signature.
:param name: The name used to register the method
:type name: str
:param method: The method to register
:type method: function
:param method_signature: The method signature for the given function
:type method_signature: MethodSignature | None
.. versionadded:: 0.1.0
"""
if inspect.ismethod(method):
raise Exception("typedjsonrpc does not support making class methods into endpoints")
self._name_to_method_info[name] = MethodInfo(name, method, method_signature)
def method(self, returns, **parameter_types):
"""Syntactic sugar for registering a method
Example:
>>> registry = Registry()
>>> @registry.method(returns=int, x=int, y=int)
... def add(x, y):
... return x + y
:param returns: The method's return type
:type returns: type
:param parameter_types: The types of the method's parameters
:type parameter_types: dict[str, type]
.. versionadded:: 0.1.0
"""
@wrapt.decorator
def type_check_wrapper(method, instance, args, kwargs):
"""Wraps a method so that it is type-checked.
:param method: The method to wrap
:type method: (T) -> U
:return: The result of calling the method with the given parameters
:rtype: U
"""
if instance is not None:
raise Exception("Instance shouldn't be set.")
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method
parameters = self._collect_parameters(parameter_names, args, kwargs, defaults)
parameter_checker.check_types(parameters, parameter_types, self._strict_floats)
result = method(*args, **kwargs)
parameter_checker.check_return_type(result, returns, self._strict_floats)
return result
def register_method(method):
"""Registers a method with its fully qualified name.
:param method: The method to register
:type method: function
:return: The original method wrapped into a type-checker
:rtype: function
"""
parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method
parameter_checker.check_type_declaration(parameter_names, parameter_types)
wrapped_method = type_check_wrapper(method, None, None, None)
fully_qualified_name = "{}.{}".format(method.__module__, method.__name__)
self.register(fully_qualified_name, wrapped_method,
MethodSignature.create(parameter_names, parameter_types, returns))
return wrapped_method
return register_method
@staticmethod
def _collect_parameters(parameter_names, args, kwargs, defaults):
"""Creates a dictionary mapping parameters names to their values in the method call.
:param parameter_names: The method's parameter names
:type parameter_names: list[string]
:param args: *args passed into the method
:type args: list[object]
:param kwargs: **kwargs passed into the method
:type kwargs: dict[string, object]
:param defaults: The method's default values
:type defaults: list[object]
:return: Dictionary mapping parameter names to values
:rtype: dict[string, object]
"""
parameters = {}
if defaults is not None:
zipped_defaults = zip(reversed(parameter_names), reversed(defaults))
for name, default in zipped_defaults:
parameters[name] = default
for name, value in zip(parameter_names, args):
parameters[name] = value
for name, value in kwargs.items():
parameters[name] = value
return parameters
def describe(self):
"""Returns a description of all the methods in the registry.
:return: Description
:rtype: dict[str, object]
.. versionadded:: 0.1.0
"""
return {
"methods": [method_info.describe()
for method_info in sorted(self._name_to_method_info.values())]
}
def _get_request_messages(self, request):
"""Parses the request as a json message.
:param request: a werkzeug request with json data
:type request: werkzeug.wrappers.Request
:return: The parsed json object
:rtype: dict[str, object]
"""
data = request.get_data(as_text=True)
try:
msg = self.json_decoder.decode(data)
except Exception:
raise ParseError("Could not parse request data '{}'".format(data))
if isinstance(msg, list):
return msg
else:
return [msg]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.