body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def _remove_temp_handler():
'\n Remove temporary handler if it exists\n '
if (TEMP_HANDLER and (TEMP_HANDLER in logging.root.handlers)):
logging.root.handlers.remove(TEMP_HANDLER)
| -8,479,857,811,240,753,000
|
Remove temporary handler if it exists
|
hubblestack/log.py
|
_remove_temp_handler
|
instructure/hubble
|
python
|
def _remove_temp_handler():
'\n \n '
if (TEMP_HANDLER and (TEMP_HANDLER in logging.root.handlers)):
logging.root.handlers.remove(TEMP_HANDLER)
|
def setup_console_logger(log_level='error', log_format='%(asctime)s [%(levelname)-5s] %(message)s', date_format='%H:%M:%S'):
'\n Sets up logging to STDERR, allowing for configurable level, format, and\n date format.\n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
| 547,214,475,094,259,900
|
Sets up logging to STDERR, allowing for configurable level, format, and
date format.
|
hubblestack/log.py
|
setup_console_logger
|
instructure/hubble
|
python
|
def setup_console_logger(log_level='error', log_format='%(asctime)s [%(levelname)-5s] %(message)s', date_format='%H:%M:%S'):
'\n Sets up logging to STDERR, allowing for configurable level, format, and\n date format.\n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
|
def setup_file_logger(log_file, log_level='error', log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] %(message)s', date_format='%Y-%m-%d %H:%M:%S', max_bytes=100000000, backup_count=1):
'\n Sets up logging to a file. By default will auto-rotate those logs every\n 100MB and keep one backup.\n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
| -1,951,438,289,589,759,200
|
Sets up logging to a file. By default will auto-rotate those logs every
100MB and keep one backup.
|
hubblestack/log.py
|
setup_file_logger
|
instructure/hubble
|
python
|
def setup_file_logger(log_file, log_level='error', log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] %(message)s', date_format='%Y-%m-%d %H:%M:%S', max_bytes=100000000, backup_count=1):
'\n Sets up logging to a file. By default will auto-rotate those logs every\n 100MB and keep one backup.\n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
|
def setup_splunk_logger():
'\n Sets up logging to splunk.\n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
| -5,930,119,731,152,631,000
|
Sets up logging to splunk.
|
hubblestack/log.py
|
setup_splunk_logger
|
instructure/hubble
|
python
|
def setup_splunk_logger():
'\n \n '
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
|
def emit_to_splunk(message, level, name):
'\n Emit a single message to splunk\n '
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if (SPLUNK_HANDLER is None):
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
| -7,925,935,624,446,416,000
|
Emit a single message to splunk
|
hubblestack/log.py
|
emit_to_splunk
|
instructure/hubble
|
python
|
def emit_to_splunk(message, level, name):
'\n \n '
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if (SPLUNK_HANDLER is None):
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
|
def workaround_salt_log_handler_queues():
'\n Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER\n '
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
' Receive a record and increase the count '
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
| 905,797,758,034,563,600
|
Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER
|
hubblestack/log.py
|
workaround_salt_log_handler_queues
|
instructure/hubble
|
python
|
def workaround_salt_log_handler_queues():
'\n \n '
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
' Receive a record and increase the count '
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
|
def filter_logs(opts_to_log, remove_dots=True):
'\n Filters out keys containing certain patterns to avoid sensitive information being sent to logs\n Works on dictionaries and lists\n This function was located at extmods/modules/conf_publisher.py previously\n '
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if ('.' in key):
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
| 5,361,334,341,806,947,000
|
Filters out keys containing certain patterns to avoid sensitive information being sent to logs
Works on dictionaries and lists
This function was located at extmods/modules/conf_publisher.py previously
|
hubblestack/log.py
|
filter_logs
|
instructure/hubble
|
python
|
def filter_logs(opts_to_log, remove_dots=True):
'\n Filters out keys containing certain patterns to avoid sensitive information being sent to logs\n Works on dictionaries and lists\n This function was located at extmods/modules/conf_publisher.py previously\n '
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if ('.' in key):
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
|
def _remove_sensitive_info(obj, patterns_to_filter):
'\n Filter known sensitive info\n '
if isinstance(obj, dict):
obj = {key: _remove_sensitive_info(value, patterns_to_filter) for (key, value) in obj.items() if (not any(((patt in key) for patt in patterns_to_filter)))}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter) for item in obj]
return obj
| 3,576,416,888,570,603,000
|
Filter known sensitive info
|
hubblestack/log.py
|
_remove_sensitive_info
|
instructure/hubble
|
python
|
def _remove_sensitive_info(obj, patterns_to_filter):
'\n \n '
if isinstance(obj, dict):
obj = {key: _remove_sensitive_info(value, patterns_to_filter) for (key, value) in obj.items() if (not any(((patt in key) for patt in patterns_to_filter)))}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter) for item in obj]
return obj
|
def handle(self, _record):
' Receive a record and increase the count '
self.count += 1
| 3,950,741,304,086,814,000
|
Receive a record and increase the count
|
hubblestack/log.py
|
handle
|
instructure/hubble
|
python
|
def handle(self, _record):
' '
self.count += 1
|
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
'Run the test suite, including test reports generation and coverage reports. '
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
rm_file(Folders.coverage_intermediate_file)
rm_file((Folders.root / 'coverage.xml'))
session.install_reqs(setup=True, install=True, tests=True, extras=('all',), versions_dct=pkg_specs)
conda_prefix = Path(session.bin)
if (conda_prefix.name == 'bin'):
conda_prefix = conda_prefix.parent
session.run2('conda list', env={'CONDA_PREFIX': str(conda_prefix), 'CONDA_DEFAULT_ENV': session.get_session_id()})
session.run2(('python ci_tools/check_python_version.py %s' % session.python))
session.run2('pip install -e . --no-deps')
session.run2(['python', '-c', ('"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name)])
if (not coverage):
session.run2(('python -m pytest --cache-clear -v %s/tests/' % pkg_name))
else:
session.install_reqs(phase='coverage', phase_reqs=['coverage', 'pytest-html', 'requests'], versions_dct=pkg_specs)
session.run2('coverage run --source {pkg_name} -m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/'.format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2('coverage report')
session.run2('coverage xml -o {covxml}'.format(covxml=Folders.coverage_xml))
session.run2('coverage html -d {dst}'.format(dst=Folders.coverage_reports))
rm_file(Folders.coverage_intermediate_file)
nox_logger.info('Generating badge for tests coverage')
session.run2(('genbadge tests -i %s -o %s -t 100' % (Folders.test_xml, Folders.test_badge)))
session.run2(('genbadge coverage -i %s -o %s' % (Folders.coverage_xml, Folders.coverage_badge)))
| -4,468,099,125,579,665,400
|
Run the test suite, including test reports generation and coverage reports.
|
noxfile.py
|
tests
|
texnofobix/python-genbadge
|
python
|
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
' '
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
rm_file(Folders.coverage_intermediate_file)
rm_file((Folders.root / 'coverage.xml'))
session.install_reqs(setup=True, install=True, tests=True, extras=('all',), versions_dct=pkg_specs)
conda_prefix = Path(session.bin)
if (conda_prefix.name == 'bin'):
conda_prefix = conda_prefix.parent
session.run2('conda list', env={'CONDA_PREFIX': str(conda_prefix), 'CONDA_DEFAULT_ENV': session.get_session_id()})
session.run2(('python ci_tools/check_python_version.py %s' % session.python))
session.run2('pip install -e . --no-deps')
session.run2(['python', '-c', ('"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name)])
if (not coverage):
session.run2(('python -m pytest --cache-clear -v %s/tests/' % pkg_name))
else:
session.install_reqs(phase='coverage', phase_reqs=['coverage', 'pytest-html', 'requests'], versions_dct=pkg_specs)
session.run2('coverage run --source {pkg_name} -m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/'.format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2('coverage report')
session.run2('coverage xml -o {covxml}'.format(covxml=Folders.coverage_xml))
session.run2('coverage html -d {dst}'.format(dst=Folders.coverage_reports))
rm_file(Folders.coverage_intermediate_file)
nox_logger.info('Generating badge for tests coverage')
session.run2(('genbadge tests -i %s -o %s -t 100' % (Folders.test_xml, Folders.test_badge)))
session.run2(('genbadge coverage -i %s -o %s' % (Folders.coverage_xml, Folders.coverage_badge)))
|
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
'Launch flake8 qualimetry.'
session.install('-r', str((Folders.ci_tools / 'flake8-requirements.txt')))
session.run2('pip install -e .[flake8]')
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
session.run('flake8', pkg_name, '--exit-zero', '--format=html', '--htmldir', str(Folders.flake8_reports), '--statistics', '--tee', '--output-file', str(Folders.flake8_intermediate_file))
session.run2(('genbadge flake8 -i %s -o %s' % (Folders.flake8_intermediate_file, Folders.flake8_badge)))
rm_file(Folders.flake8_intermediate_file)
| 7,663,644,602,271,633,000
|
Launch flake8 qualimetry.
|
noxfile.py
|
flake8
|
texnofobix/python-genbadge
|
python
|
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
session.install('-r', str((Folders.ci_tools / 'flake8-requirements.txt')))
session.run2('pip install -e .[flake8]')
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
session.run('flake8', pkg_name, '--exit-zero', '--format=html', '--htmldir', str(Folders.flake8_reports), '--statistics', '--tee', '--output-file', str(Folders.flake8_intermediate_file))
session.run2(('genbadge flake8 -i %s -o %s' % (Folders.flake8_intermediate_file, Folders.flake8_badge)))
rm_file(Folders.flake8_intermediate_file)
|
@power_session(python=[PY37])
def docs(session: PowerSession):
"Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead."
session.install_reqs(phase='docs', phase_reqs=['mkdocs-material', 'mkdocs', 'pymdown-extensions', 'pygments'])
if session.posargs:
session.run2(('mkdocs -f ./docs/mkdocs.yml %s' % ' '.join(session.posargs)))
else:
session.run2('mkdocs serve -f ./docs/mkdocs.yml')
| -3,700,643,923,249,329,000
|
Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead.
|
noxfile.py
|
docs
|
texnofobix/python-genbadge
|
python
|
@power_session(python=[PY37])
def docs(session: PowerSession):
session.install_reqs(phase='docs', phase_reqs=['mkdocs-material', 'mkdocs', 'pymdown-extensions', 'pygments'])
if session.posargs:
session.run2(('mkdocs -f ./docs/mkdocs.yml %s' % ' '.join(session.posargs)))
else:
session.run2('mkdocs serve -f ./docs/mkdocs.yml')
|
@power_session(python=[PY37])
def publish(session: PowerSession):
'Deploy the docs+reports on github pages. Note: this rebuilds the docs'
session.install_reqs(phase='mkdocs', phase_reqs=['mkdocs-material', 'mkdocs', 'pymdown-extensions', 'pygments'])
session.run2('mkdocs build -f ./docs/mkdocs.yml')
if (not Folders.site_reports.exists()):
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
session.run2('mkdocs gh-deploy -f ./docs/mkdocs.yml')
| -5,760,951,214,420,701,000
|
Deploy the docs+reports on github pages. Note: this rebuilds the docs
|
noxfile.py
|
publish
|
texnofobix/python-genbadge
|
python
|
@power_session(python=[PY37])
def publish(session: PowerSession):
session.install_reqs(phase='mkdocs', phase_reqs=['mkdocs-material', 'mkdocs', 'pymdown-extensions', 'pygments'])
session.run2('mkdocs build -f ./docs/mkdocs.yml')
if (not Folders.site_reports.exists()):
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
session.run2('mkdocs gh-deploy -f ./docs/mkdocs.yml')
|
@power_session(python=[PY37])
def release(session: PowerSession):
'Create a release on github corresponding to the latest tag'
from setuptools_scm import get_version
from setuptools_scm.version import guess_next_dev_version
version = []
def my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version('.', version_scheme=my_scheme)
session.install_reqs(phase='setup.py#dist', phase_reqs=['setuptools_scm'])
rm_folder(Folders.dist)
session.run2('python setup.py sdist bdist_wheel')
if (version[0].dirty or (not version[0].exact)):
raise ValueError('You need to execute this action on a clean tag version with no local changes.')
if (len(session.posargs) == 1):
gh_token = session.posargs[0]
publish_on_pypi = False
elif (len(session.posargs) == 0):
publish_on_pypi = True
import keyring
gh_token = keyring.get_password('https://docs.github.com/en/rest', 'token')
assert (len(gh_token) > 0)
else:
raise ValueError('Only a single positional arg is allowed for now')
if publish_on_pypi:
session.install_reqs(phase='PyPi', phase_reqs=['twine'])
session.run2('twine upload dist/* -u smarie')
session.install_reqs(phase='release', phase_reqs=['click', 'PyGithub'])
session.run2('python ci_tools/github_release.py -s {gh_token} --repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md -d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}'.format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
| 3,323,425,240,592,413,000
|
Create a release on github corresponding to the latest tag
|
noxfile.py
|
release
|
texnofobix/python-genbadge
|
python
|
@power_session(python=[PY37])
def release(session: PowerSession):
from setuptools_scm import get_version
from setuptools_scm.version import guess_next_dev_version
version = []
def my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version('.', version_scheme=my_scheme)
session.install_reqs(phase='setup.py#dist', phase_reqs=['setuptools_scm'])
rm_folder(Folders.dist)
session.run2('python setup.py sdist bdist_wheel')
if (version[0].dirty or (not version[0].exact)):
raise ValueError('You need to execute this action on a clean tag version with no local changes.')
if (len(session.posargs) == 1):
gh_token = session.posargs[0]
publish_on_pypi = False
elif (len(session.posargs) == 0):
publish_on_pypi = True
import keyring
gh_token = keyring.get_password('https://docs.github.com/en/rest', 'token')
assert (len(gh_token) > 0)
else:
raise ValueError('Only a single positional arg is allowed for now')
if publish_on_pypi:
session.install_reqs(phase='PyPi', phase_reqs=['twine'])
session.run2('twine upload dist/* -u smarie')
session.install_reqs(phase='release', phase_reqs=['click', 'PyGithub'])
session.run2('python ci_tools/github_release.py -s {gh_token} --repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md -d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}'.format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
|
@nox.session(python=False)
def gha_list(session):
'(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions.'
if (len(session.posargs) != 1):
raise ValueError('This session has a mandatory argument: <base_session_name>')
session_func = globals()[session.posargs[0]]
try:
session_func.parametrize
except AttributeError:
sessions_list = [('%s-%s' % (session_func.__name__, py)) for py in session_func.python]
else:
sessions_list = [('%s-%s(%s)' % (session_func.__name__, py, param)) for (py, param) in product(session_func.python, session_func.parametrize)]
print(dumps(sessions_list))
| 4,695,728,447,206,028,000
|
(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions.
|
noxfile.py
|
gha_list
|
texnofobix/python-genbadge
|
python
|
@nox.session(python=False)
def gha_list(session):
if (len(session.posargs) != 1):
raise ValueError('This session has a mandatory argument: <base_session_name>')
session_func = globals()[session.posargs[0]]
try:
session_func.parametrize
except AttributeError:
sessions_list = [('%s-%s' % (session_func.__name__, py)) for py in session_func.python]
else:
sessions_list = [('%s-%s(%s)' % (session_func.__name__, py, param)) for (py, param) in product(session_func.python, session_func.parametrize)]
print(dumps(sessions_list))
|
def _query_for_quote(symbol):
'\n 返回请求某个合约的合约信息的 query_pack\n 调用次函数应该全部都是sdk的代码主动请求合约信息\n 用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的\n '
symbol_list = (symbol if isinstance(symbol, list) else [symbol])
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {'aid': 'ins_query', 'query_id': _generate_uuid(prefix='PYSDK_quote_'), 'query': op.__to_graphql__()}
| -8,257,304,933,987,689,000
|
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
|
tqsdk/utils.py
|
_query_for_quote
|
Al-Wang/tqsdk-python
|
python
|
def _query_for_quote(symbol):
'\n 返回请求某个合约的合约信息的 query_pack\n 调用次函数应该全部都是sdk的代码主动请求合约信息\n 用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的\n '
symbol_list = (symbol if isinstance(symbol, list) else [symbol])
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {'aid': 'ins_query', 'query_id': _generate_uuid(prefix='PYSDK_quote_'), 'query': op.__to_graphql__()}
|
def _query_for_init():
'\n 返回某些类型合约的 query\n todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]\n '
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=['FUTURE', 'INDEX', 'OPTION', 'COMBINE', 'CONT'], exchange_id=['SHFE', 'DCE', 'CZCE', 'INE', 'CFFEX', 'KQ'])
_add_all_frags(query)
return op.__to_graphql__()
| -7,600,899,964,340,058,000
|
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
|
tqsdk/utils.py
|
_query_for_init
|
Al-Wang/tqsdk-python
|
python
|
def _query_for_init():
'\n 返回某些类型合约的 query\n todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]\n '
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=['FUTURE', 'INDEX', 'OPTION', 'COMBINE', 'CONT'], exchange_id=['SHFE', 'DCE', 'CZCE', 'INE', 'CFFEX', 'KQ'])
_add_all_frags(query)
return op.__to_graphql__()
|
def _quotes_add_night(quotes):
'为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间'
for symbol in quotes:
product_id = quotes[symbol].get('product_id')
if (quotes[symbol].get('trading_time') and product_id):
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if ((key in night_trading_table) and (not quotes[symbol]['trading_time'].get('night'))):
quotes[symbol]['trading_time']['night'] = [night_trading_table[key]]
| 198,753,870,435,223,900
|
为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间
|
tqsdk/utils.py
|
_quotes_add_night
|
Al-Wang/tqsdk-python
|
python
|
def _quotes_add_night(quotes):
for symbol in quotes:
product_id = quotes[symbol].get('product_id')
if (quotes[symbol].get('trading_time') and product_id):
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if ((key in night_trading_table) and (not quotes[symbol]['trading_time'].get('night'))):
quotes[symbol]['trading_time']['night'] = [night_trading_table[key]]
|
def _bisect_value(a, x, priority='right'):
'\n 返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值\n a: 必须是已经排序好(升序排列)的 list\n bisect_right : Return the index where to insert item x in list a, assuming a is sorted.\n '
assert (priority in ['left', 'right'])
insert_index = bisect_right(a, x)
if (0 < insert_index < len(a)):
left_dis = (x - a[(insert_index - 1)])
right_dis = (a[insert_index] - x)
if (left_dis == right_dis):
mid_index = ((insert_index - 1) if (priority == 'left') else insert_index)
elif (left_dis < right_dis):
mid_index = (insert_index - 1)
else:
mid_index = insert_index
else:
assert ((insert_index == 0) or (insert_index == len(a)))
mid_index = (0 if (insert_index == 0) else (len(a) - 1))
return a[mid_index]
| -4,910,537,497,647,901,000
|
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
|
tqsdk/utils.py
|
_bisect_value
|
Al-Wang/tqsdk-python
|
python
|
def _bisect_value(a, x, priority='right'):
'\n 返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值\n a: 必须是已经排序好(升序排列)的 list\n bisect_right : Return the index where to insert item x in list a, assuming a is sorted.\n '
assert (priority in ['left', 'right'])
insert_index = bisect_right(a, x)
if (0 < insert_index < len(a)):
left_dis = (x - a[(insert_index - 1)])
right_dis = (a[insert_index] - x)
if (left_dis == right_dis):
mid_index = ((insert_index - 1) if (priority == 'left') else insert_index)
elif (left_dis < right_dis):
mid_index = (insert_index - 1)
else:
mid_index = insert_index
else:
assert ((insert_index == 0) or (insert_index == len(a)))
mid_index = (0 if (insert_index == 0) else (len(a) - 1))
return a[mid_index]
|
def testcase_readergroup_add(self):
'tests groups=groups+[newgroups]'
groupssnapshot = list(readergroups())
groups = readergroups()
groups = (groups + [self.pinpadgroup])
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups = (groups + [self.biogroup, self.pinpadgroup])
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
| 2,148,898,669,865,351,000
|
tests groups=groups+[newgroups]
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
testcase_readergroup_add
|
kyletanyag/LL-Smartcard
|
python
|
def testcase_readergroup_add(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups = (groups + [self.pinpadgroup])
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups = (groups + [self.biogroup, self.pinpadgroup])
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
|
def testcase_readergroup_iadd(self):
'test groups+=[newgroups]'
groupssnapshot = list(readergroups())
groups = readergroups()
groups += [self.pinpadgroup]
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
| -4,554,897,509,285,952,500
|
test groups+=[newgroups]
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
testcase_readergroup_iadd
|
kyletanyag/LL-Smartcard
|
python
|
def testcase_readergroup_iadd(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups += [self.pinpadgroup]
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
|
def testcase_readergroup_radd(self):
'test groups=[newgroups]+groups'
groupssnapshot = list(readergroups())
groups = readergroups()
zgroups = ([self.pinpadgroup] + groups)
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, (groupssnapshot + [self.pinpadgroup]))
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
zgroups = ([self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups)
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
| 6,720,619,275,553,248,000
|
test groups=[newgroups]+groups
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
testcase_readergroup_radd
|
kyletanyag/LL-Smartcard
|
python
|
def testcase_readergroup_radd(self):
groupssnapshot = list(readergroups())
groups = readergroups()
zgroups = ([self.pinpadgroup] + groups)
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, (groupssnapshot + [self.pinpadgroup]))
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
zgroups = ([self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups)
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
|
def testcase_readergroup_append(self):
'test groups.append(newgroups)'
groupssnapshot = list(readergroups())
groups = readergroups()
groups.append(self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.append(self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.append(self.biogroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
| 8,593,865,738,370,409,000
|
test groups.append(newgroups)
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
testcase_readergroup_append
|
kyletanyag/LL-Smartcard
|
python
|
def testcase_readergroup_append(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups.append(self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.append(self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.append(self.biogroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
|
def testcase_readergroup_insert(self):
'test groups.insert(i,newgroups)'
groupssnapshot = list(readergroups())
groups = readergroups()
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.insert(1, self.biogroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
| 8,374,669,445,519,692,000
|
test groups.insert(i,newgroups)
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
testcase_readergroup_insert
|
kyletanyag/LL-Smartcard
|
python
|
def testcase_readergroup_insert(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup]))
groups.insert(1, self.biogroup)
self.assertEqual(groups, (groupssnapshot + [self.pinpadgroup, self.biogroup]))
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
|
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
'\n Load ParallelComponent of the Kaldi model.\n ParallelComponent contains parallel nested networks.\n VariadicSplit is inserted before nested networks.\n Outputs of nested networks concatenate with layer Concat.\n\n :param file_descr: descriptor of the model file\n :param graph: graph with the topology.\n :param prev_layer_id: id of the input layers for parallel component layer\n :return: id of the concat layer - last layer of the parallel component layers\n '
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if (node in graph)}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[(- 1)]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
for (i, (input_node, output_node)) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
| -6,662,843,149,624,463,000
|
Load ParallelComponent of the Kaldi model.
ParallelComponent contains parallel nested networks.
VariadicSplit is inserted before nested networks.
Outputs of nested networks concatenate with layer Concat.
:param file_descr: descriptor of the model file
:param graph: graph with the topology.
:param prev_layer_id: id of the input layers for parallel component layer
:return: id of the concat layer - last layer of the parallel component layers
|
tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py
|
load_parallel_component
|
3Demonica/openvino
|
python
|
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
'\n Load ParallelComponent of the Kaldi model.\n ParallelComponent contains parallel nested networks.\n VariadicSplit is inserted before nested networks.\n Outputs of nested networks concatenate with layer Concat.\n\n :param file_descr: descriptor of the model file\n :param graph: graph with the topology.\n :param prev_layer_id: id of the input layers for parallel component layer\n :return: id of the concat layer - last layer of the parallel component layers\n '
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if (node in graph)}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[(- 1)]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
for (i, (input_node, output_node)) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
|
def load_kaldi_model(graph, nnet_path):
'\n Structure of the file is the following:\n magic-number(16896)<Nnet> <Next Layer Name> weights etc.\n :param nnet_path:\n :return:\n '
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, 'rb')
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
if (tag == '<Nnet>'):
load_function = load_kalid_nnet1_model
elif (tag == '<TransitionModel>'):
while ((tag != '<Nnet>') and (tag != '<Nnet3>')):
tag = find_next_tag(file_desc)
if (tag == '<Nnet3>'):
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif (tag == '<Nnet3>'):
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ', refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
| 4,593,314,106,552,690,000
|
Structure of the file is the following:
magic-number(16896)<Nnet> <Next Layer Name> weights etc.
:param nnet_path:
:return:
|
tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py
|
load_kaldi_model
|
3Demonica/openvino
|
python
|
def load_kaldi_model(graph, nnet_path):
'\n Structure of the file is the following:\n magic-number(16896)<Nnet> <Next Layer Name> weights etc.\n :param nnet_path:\n :return:\n '
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, 'rb')
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
if (tag == '<Nnet>'):
load_function = load_kalid_nnet1_model
elif (tag == '<TransitionModel>'):
while ((tag != '<Nnet>') and (tag != '<Nnet3>')):
tag = find_next_tag(file_desc)
if (tag == '<Nnet3>'):
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif (tag == '<Nnet3>'):
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ', refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
|
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd='\r'):
'\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)\n \n From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\n '
percent = (('{0:.' + str(decimals)) + 'f}').format((100 * (iteration / float(total))))
filledLength = int(((length * iteration) // total))
bar = ((fill * filledLength) + ('-' * (length - filledLength)))
print(('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix)), end=printEnd)
if (iteration == total):
print()
| -4,832,368,723,198,576,000
|
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "
", "
") (Str)
From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
|
src/utils/console_functions.py
|
printProgressBar
|
MariusDgr/AudioMining
|
python
|
def printProgressBar(iteration, total, prefix=, suffix=, decimals=1, length=100, fill='█', printEnd='\r'):
'\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)\n \n From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\n '
percent = (('{0:.' + str(decimals)) + 'f}').format((100 * (iteration / float(total))))
filledLength = int(((length * iteration) // total))
bar = ((fill * filledLength) + ('-' * (length - filledLength)))
print(('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix)), end=printEnd)
if (iteration == total):
print()
|
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
' Compute basic portfolio metrics: return, stdv, sharpe ratio '
portfolio_return = np.sum((weights * avg_xs_returns))
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = (portfolio_return / portfolio_stdv)
tickers = covariance_matrix.columns
metrics = {'return': portfolio_return, 'stdv': portfolio_stdv, 'sharpe': portfolio_sharpe, 'weights': weights}
metrics.update(dict([(ticker, weight) for (ticker, weight) in zip(tickers, weights)]).items())
return metrics
| -7,040,679,439,820,220,000
|
Compute basic portfolio metrics: return, stdv, sharpe ratio
|
portfolio_functions.py
|
portfolio_metrics
|
MaxGosselin/portfolio_optimizer
|
python
|
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
' '
portfolio_return = np.sum((weights * avg_xs_returns))
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = (portfolio_return / portfolio_stdv)
tickers = covariance_matrix.columns
metrics = {'return': portfolio_return, 'stdv': portfolio_stdv, 'sharpe': portfolio_sharpe, 'weights': weights}
metrics.update(dict([(ticker, weight) for (ticker, weight) in zip(tickers, weights)]).items())
return metrics
|
def simulate_portfolios(iters, xs_stats, covariance_matrix):
' What we want here is to randomly generate portfolios that will sit \n inside the efficiency frontier for illustrative purposes '
simulations = []
while (iters > 1):
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
| -4,991,181,571,714,116,000
|
What we want here is to randomly generate portfolios that will sit
inside the efficiency frontier for illustrative purposes
|
portfolio_functions.py
|
simulate_portfolios
|
MaxGosselin/portfolio_optimizer
|
python
|
def simulate_portfolios(iters, xs_stats, covariance_matrix):
' What we want here is to randomly generate portfolios that will sit \n inside the efficiency frontier for illustrative purposes '
simulations = []
while (iters > 1):
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
|
def solve_minvar(xs_avg, covariance_matrix):
' Solve for the weights of the minimum variance portfolio \n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
| -3,516,912,878,263,464,000
|
Solve for the weights of the minimum variance portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
|
portfolio_functions.py
|
solve_minvar
|
MaxGosselin/portfolio_optimizer
|
python
|
def solve_minvar(xs_avg, covariance_matrix):
' Solve for the weights of the minimum variance portfolio \n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
|
def solve_maxsharpe(xs_avg, covariance_matrix):
' Solve for the weights of the maximum Sharpe ratio portfolio \n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return ((- pm['return']) / pm['stdv'])
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1 / p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
| -6,017,148,510,320,264,000
|
Solve for the weights of the maximum Sharpe ratio portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
|
portfolio_functions.py
|
solve_maxsharpe
|
MaxGosselin/portfolio_optimizer
|
python
|
def solve_maxsharpe(xs_avg, covariance_matrix):
' Solve for the weights of the maximum Sharpe ratio portfolio \n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return ((- pm['return']) / pm['stdv'])
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1 / p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
|
def solve_for_target_return(xs_avg, covariance_matrix, target):
' Solve for the weights of the minimum variance portfolio which has\n a specific targeted return.\n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n portfolio return = target return,\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
' Anonymous function to check equality with the target return '
return np.sum((weights * xs_avg))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}, {'type': 'eq', 'fun': (lambda x: (__match_target(x) - target))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__minvar, ((1 / p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
| 6,640,005,835,390,372,000
|
Solve for the weights of the minimum variance portfolio which has
a specific targeted return.
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
portfolio return = target return,
Returns the weights and the jacobian used to generate the solution.
|
portfolio_functions.py
|
solve_for_target_return
|
MaxGosselin/portfolio_optimizer
|
python
|
def solve_for_target_return(xs_avg, covariance_matrix, target):
' Solve for the weights of the minimum variance portfolio which has\n a specific targeted return.\n\n Constraints:\n sum of weights = 1,\n weights bound by [0, 0.2],\n portfolio return = target return,\n\n Returns the weights and the jacobian used to generate the solution.\n \n '
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
' Anonymous function to check equality with the target return '
return np.sum((weights * xs_avg))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': (lambda x: (np.sum(x) - 1))}, {'type': 'eq', 'fun': (lambda x: (__match_target(x) - target))}]
bounds = ([(0, 0.2)] * p_size)
minimized_weights = optimize.minimize(__minvar, ((1 / p_size) * np.ones(p_size)), args=args, method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter': 1000})
return minimized_weights
|
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
| 7,441,897,879,151,888,000
|
Anonymous function to compute stdv
|
portfolio_functions.py
|
__minvar
|
MaxGosselin/portfolio_optimizer
|
python
|
def __minvar(weights, xs_avg, covariance_matrix):
' '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
|
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return ((- pm['return']) / pm['stdv'])
| -6,553,485,962,850,862,000
|
Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative.
|
portfolio_functions.py
|
__max_by_min_sharpe
|
MaxGosselin/portfolio_optimizer
|
python
|
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
' '
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return ((- pm['return']) / pm['stdv'])
|
def __minvar(weights, xs_avg, covariance_matrix):
' Anonymous function to compute stdv '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
| 7,441,897,879,151,888,000
|
Anonymous function to compute stdv
|
portfolio_functions.py
|
__minvar
|
MaxGosselin/portfolio_optimizer
|
python
|
def __minvar(weights, xs_avg, covariance_matrix):
' '
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
|
def __match_target(weights):
' Anonymous function to check equality with the target return '
return np.sum((weights * xs_avg))
| -6,367,836,879,853,125,000
|
Anonymous function to check equality with the target return
|
portfolio_functions.py
|
__match_target
|
MaxGosselin/portfolio_optimizer
|
python
|
def __match_target(weights):
' '
return np.sum((weights * xs_avg))
|
def _base_parse(fh, builder, IndentationSetupF=False):
'Parses pattern definitions of the form:\n \n [ \t] => grid 4;\n [:intersection([:alpha:], [\\X064-\\X066]):] => space 1;\n\n In other words the right hand side *must* be a character set.\n\n ADAPTS: result to contain parsing information.\n '
while ((1 + 1) == 2):
skip_whitespace(fh)
if check(fh, '>'):
break
(pattern, identifier, sr) = _parse_definition_head(fh, builder.identifier_list)
if ((pattern is None) and (not builder.keyword_else_f)):
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if (not check(fh, ';')):
error.log(("Missing ';' after '%s' specification." % identifier), fh)
return builder.finalize()
| -2,264,336,187,077,974,500
|
Parses pattern definitions of the form:
[ ] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
ADAPTS: result to contain parsing information.
|
quex/input/files/specifier/counter.py
|
_base_parse
|
Liby99/quex
|
python
|
def _base_parse(fh, builder, IndentationSetupF=False):
'Parses pattern definitions of the form:\n \n [ \t] => grid 4;\n [:intersection([:alpha:], [\\X064-\\X066]):] => space 1;\n\n In other words the right hand side *must* be a character set.\n\n ADAPTS: result to contain parsing information.\n '
while ((1 + 1) == 2):
skip_whitespace(fh)
if check(fh, '>'):
break
(pattern, identifier, sr) = _parse_definition_head(fh, builder.identifier_list)
if ((pattern is None) and (not builder.keyword_else_f)):
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if (not check(fh, ';')):
error.log(("Missing ';' after '%s' specification." % identifier), fh)
return builder.finalize()
|
def _check_grid_values_integer_multiples(CaMap):
"If there are no spaces and the grid is on a homogeneous scale,\n => then the grid can be transformed into 'easy-to-compute' spaces.\n "
grid_value_list = []
min_info = None
for (character_set, info) in CaMap:
if (info.cc_type == E_CharacterCountType.COLUMN):
return
elif (info.cc_type != E_CharacterCountType.GRID):
continue
elif (type(info.value) in (str, str)):
return
grid_value_list.append(info.value)
if ((min_info is None) or (info.value < min_info.value)):
min_info = info
if (min_info is None):
return
if all((((x % min_info.value) == 0) for x in grid_value_list)):
error.warning((('Setup does not contain spaces, only grids (tabulators). All grid\nwidths are multiples of %i. The grid setup %s is equivalent to\n' % (min_info.value, repr(sorted(grid_value_list))[1:(- 1)])) + ('a setup with space counts %s. Space counts are faster to compute.\n' % repr([(x / min_info.value) for x in sorted(grid_value_list)])[1:(- 1)])), min_info.sr)
return
| -6,188,627,997,836,072,000
|
If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
|
quex/input/files/specifier/counter.py
|
_check_grid_values_integer_multiples
|
Liby99/quex
|
python
|
def _check_grid_values_integer_multiples(CaMap):
"If there are no spaces and the grid is on a homogeneous scale,\n => then the grid can be transformed into 'easy-to-compute' spaces.\n "
grid_value_list = []
min_info = None
for (character_set, info) in CaMap:
if (info.cc_type == E_CharacterCountType.COLUMN):
return
elif (info.cc_type != E_CharacterCountType.GRID):
continue
elif (type(info.value) in (str, str)):
return
grid_value_list.append(info.value)
if ((min_info is None) or (info.value < min_info.value)):
min_info = info
if (min_info is None):
return
if all((((x % min_info.value) == 0) for x in grid_value_list)):
error.warning((('Setup does not contain spaces, only grids (tabulators). All grid\nwidths are multiples of %i. The grid setup %s is equivalent to\n' % (min_info.value, repr(sorted(grid_value_list))[1:(- 1)])) + ('a setup with space counts %s. Space counts are faster to compute.\n' % repr([(x / min_info.value) for x in sorted(grid_value_list)])[1:(- 1)])), min_info.sr)
return
|
def check_defined(CaMap, SourceReference, CCT):
'Checks whether the character counter type has been defined in the \n map.\n \n THROWS: Error in case that is has not been defined.\n '
for (character_set, info) in CaMap:
if (info.cc_type == CCT):
return
error.warning(("Setup does not define '%s'." % cc_type_name_db[CCT]), SourceReference, SuppressCode=NotificationDB.warning_counter_setup_without_newline)
| -7,588,525,549,289,565,000
|
Checks whether the character counter type has been defined in the
map.
THROWS: Error in case that is has not been defined.
|
quex/input/files/specifier/counter.py
|
check_defined
|
Liby99/quex
|
python
|
def check_defined(CaMap, SourceReference, CCT):
'Checks whether the character counter type has been defined in the \n map.\n \n THROWS: Error in case that is has not been defined.\n '
for (character_set, info) in CaMap:
if (info.cc_type == CCT):
return
error.warning(("Setup does not define '%s'." % cc_type_name_db[CCT]), SourceReference, SuppressCode=NotificationDB.warning_counter_setup_without_newline)
|
def __sm_newline_default(self):
"Default newline: '(\n)|(\r\n)'\n "
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
| 1,336,341,528,381,798,700
|
Default newline: '(
)|(
)'
|
quex/input/files/specifier/counter.py
|
__sm_newline_default
|
Liby99/quex
|
python
|
def __sm_newline_default(self):
"Default newline: '(\n)|(\r\n)'\n "
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
|
def __sm_whitespace_default(self):
"Try to define default whitespace ' ' or '\t' if their positions\n are not yet occupied in the count_command_map.\n "
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if (self.sm_badspace.get() is not None):
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if (sm_whitespace.is_Empty() or outrun.do(self.sm_badspace.get(), sm_whitespace)):
error.log("Cannot define default 'whitespace' in the frame of the given\ndefinition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
| -5,222,298,472,099,206,000
|
Try to define default whitespace ' ' or ' ' if their positions
are not yet occupied in the count_command_map.
|
quex/input/files/specifier/counter.py
|
__sm_whitespace_default
|
Liby99/quex
|
python
|
def __sm_whitespace_default(self):
"Try to define default whitespace ' ' or '\t' if their positions\n are not yet occupied in the count_command_map.\n "
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if (self.sm_badspace.get() is not None):
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if (sm_whitespace.is_Empty() or outrun.do(self.sm_badspace.get(), sm_whitespace)):
error.log("Cannot define default 'whitespace' in the frame of the given\ndefinition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
|
def _consistency_check(self):
"\n Required defintions:\n -- WHITESPACE (Default done automatically) => Assert.\n -- NEWLINE (Default done automatically) => Assert.\n\n Inadmissible 'eat-into'.\n -- SUPPRESSOR shall not eat into [NEWLINE]\n -- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]\n -- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].\n -- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].\n\n No common lexemes:\n -- WHITESPACE and BADSPACE may not have common lexemes.\n\n Outrun:\n -- NEWLINE may not start with SUSPEND and vice versa\n -- NEWLINE may not start with SUPPRESSOR and vice versa\n -- SUPPRESSOR may not start with SUSPEND and vice versa\n -- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.\n (BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')\n "
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
cmp_list = ((([(newline, badspace), (newline, whitespace), (newline, suppressor), (suppressor, newline), (whitespace, newline), (whitespace, suppressor), (badspace, newline), (badspace, suppressor)] + [(whitespace, x) for x in suspend_list]) + [(newline, x) for x in suspend_list]) + [(badspace, x) for x in suspend_list])
def _error(FormatStr, Sro0, Sro1):
error.log((FormatStr % (Sro0.name, Sro1.name)), Sro0.sr, DontExitF=True)
error.log(("'%s' defined here." % Sro1.name), Sro1.sr)
def _iterate(SroPairList):
for (first_sro, second_sro) in cmp_list:
(first, second) = (first_sro.get(), second_sro.get())
if ((first is None) or (second is None)):
continue
(yield (first_sro, first, second_sro, second))
for (first_sro, first, second_sro, second) in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
(only_common_f, common_f) = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f, "indentation handler's newline", self.sm_newline.sr, 'suspend', sm_suspend.sr)
if (badspace.get() and (not intersection.do([badspace.get(), whitespace.get()]).is_Empty())):
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
cmp_list = [(newline, suppressor), (suppressor, newline), (whitespace, badspace)]
for x in suspend_list:
cmp_list.extend([(newline, x), (x, newline), (suppressor, x), (x, suppressor)])
for (first_sro, first, second_sro, second) in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
| -4,516,450,391,270,619,000
|
Required defintions:
-- WHITESPACE (Default done automatically) => Assert.
-- NEWLINE (Default done automatically) => Assert.
Inadmissible 'eat-into'.
-- SUPPRESSOR shall not eat into [NEWLINE]
-- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]
-- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
-- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
No common lexemes:
-- WHITESPACE and BADSPACE may not have common lexemes.
Outrun:
-- NEWLINE may not start with SUSPEND and vice versa
-- NEWLINE may not start with SUPPRESSOR and vice versa
-- SUPPRESSOR may not start with SUSPEND and vice versa
-- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.
(BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
|
quex/input/files/specifier/counter.py
|
_consistency_check
|
Liby99/quex
|
python
|
def _consistency_check(self):
"\n Required defintions:\n -- WHITESPACE (Default done automatically) => Assert.\n -- NEWLINE (Default done automatically) => Assert.\n\n Inadmissible 'eat-into'.\n -- SUPPRESSOR shall not eat into [NEWLINE]\n -- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]\n -- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].\n -- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].\n\n No common lexemes:\n -- WHITESPACE and BADSPACE may not have common lexemes.\n\n Outrun:\n -- NEWLINE may not start with SUSPEND and vice versa\n -- NEWLINE may not start with SUPPRESSOR and vice versa\n -- SUPPRESSOR may not start with SUSPEND and vice versa\n -- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.\n (BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')\n "
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
cmp_list = ((([(newline, badspace), (newline, whitespace), (newline, suppressor), (suppressor, newline), (whitespace, newline), (whitespace, suppressor), (badspace, newline), (badspace, suppressor)] + [(whitespace, x) for x in suspend_list]) + [(newline, x) for x in suspend_list]) + [(badspace, x) for x in suspend_list])
def _error(FormatStr, Sro0, Sro1):
error.log((FormatStr % (Sro0.name, Sro1.name)), Sro0.sr, DontExitF=True)
error.log(("'%s' defined here." % Sro1.name), Sro1.sr)
def _iterate(SroPairList):
for (first_sro, second_sro) in cmp_list:
(first, second) = (first_sro.get(), second_sro.get())
if ((first is None) or (second is None)):
continue
(yield (first_sro, first, second_sro, second))
for (first_sro, first, second_sro, second) in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
(only_common_f, common_f) = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f, "indentation handler's newline", self.sm_newline.sr, 'suspend', sm_suspend.sr)
if (badspace.get() and (not intersection.do([badspace.get(), whitespace.get()]).is_Empty())):
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
cmp_list = [(newline, suppressor), (suppressor, newline), (whitespace, badspace)]
for x in suspend_list:
cmp_list.extend([(newline, x), (x, newline), (suppressor, x), (x, suppressor)])
for (first_sro, first, second_sro, second) in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
|
def get_enrollment_dates(course):
'Takes a course object and returns student dates of enrollment.\n Useful for handling late registrations and modified deadlines.\n\n Example:\n course.get_enrollment_date()'
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'enrollments')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'type': ['StudentEnrollment'], 'per_page': '100'})
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T', '-').replace(':', '-')[:16]
return enrollment_dates
| -5,592,095,403,443,192,000
|
Takes a course object and returns student dates of enrollment.
Useful for handling late registrations and modified deadlines.
Example:
course.get_enrollment_date()
|
scripts/canvas.py
|
get_enrollment_dates
|
hsmohammed/rudaux
|
python
|
def get_enrollment_dates(course):
'Takes a course object and returns student dates of enrollment.\n Useful for handling late registrations and modified deadlines.\n\n Example:\n course.get_enrollment_date()'
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'enrollments')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'type': ['StudentEnrollment'], 'per_page': '100'})
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T', '-').replace(':', '-')[:16]
return enrollment_dates
|
def get_assignments(course):
'Takes a course object and returns\n a Pandas data frame with all existing assignments and their attributes/data\n\n Example:\n course.get_assignments()'
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(url=api_url, headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '10000'})
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
| 2,791,318,408,290,562,000
|
Takes a course object and returns
a Pandas data frame with all existing assignments and their attributes/data
Example:
course.get_assignments()
|
scripts/canvas.py
|
get_assignments
|
hsmohammed/rudaux
|
python
|
def get_assignments(course):
'Takes a course object and returns\n a Pandas data frame with all existing assignments and their attributes/data\n\n Example:\n course.get_assignments()'
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(url=api_url, headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '10000'})
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
|
def get_assignment_lock_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if (lock_date is None):
return lock_date
lock_date = lock_date.replace('T', '-')
lock_date = lock_date.replace(':', '-')
return lock_date[:16]
| 3,708,928,769,583,871,500
|
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')
|
scripts/canvas.py
|
get_assignment_lock_date
|
hsmohammed/rudaux
|
python
|
def get_assignment_lock_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if (lock_date is None):
return lock_date
lock_date = lock_date.replace('T', '-')
lock_date = lock_date.replace(':', '-')
return lock_date[:16]
|
def get_assignment_due_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if (due_date is None):
return due_date
due_date = due_date.replace('T', '-')
due_date = due_date.replace(':', '-')
return due_date[:16]
| 5,000,143,287,905,871,000
|
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')
|
scripts/canvas.py
|
get_assignment_due_date
|
hsmohammed/rudaux
|
python
|
def get_assignment_due_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_due_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if (due_date is None):
return due_date
due_date = due_date.replace('T', '-')
due_date = due_date.replace(':', '-')
return due_date[:16]
|
def get_assignment_unlock_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_unlock_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if (unlock_date is None):
return unlock_date
unlock_date = unlock_date.replace('T', '-').replace(':', '-')
return unlock_date[:16]
| 8,767,283,540,079,634,000
|
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_unlock_date('worksheet_01')
|
scripts/canvas.py
|
get_assignment_unlock_date
|
hsmohammed/rudaux
|
python
|
def get_assignment_unlock_date(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.\n \n Example:\n course.get_assignment_unlock_date('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if (unlock_date is None):
return unlock_date
unlock_date = unlock_date.replace('T', '-').replace(':', '-')
return unlock_date[:16]
|
def get_assignment_id(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the Canvas ID.\n \n Example:\n course.get_assignment_id('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
| 3,881,977,869,741,318,700
|
Takes a course object and the name of a Canvas assignment and returns the Canvas ID.
Example:
course.get_assignment_id('worksheet_01')
|
scripts/canvas.py
|
get_assignment_id
|
hsmohammed/rudaux
|
python
|
def get_assignment_id(course, assignment):
"Takes a course object and the name of a Canvas assignment and returns the Canvas ID.\n \n Example:\n course.get_assignment_id('worksheet_01')"
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
|
def get_grades(course, assignment):
"Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')"
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '100'})
scores.update({res['user_id']: res['score'] for res in resp.json()})
return scores
| 2,481,858,038,511,870,500
|
Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')
|
scripts/canvas.py
|
get_grades
|
hsmohammed/rudaux
|
python
|
def get_grades(course, assignment):
"Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')"
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '100'})
scores.update({res['user_id']: res['score'] for res in resp.json()})
return scores
|
def grades_need_posting(course, assignment):
"Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')"
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '100'})
posted_flags.extend([(subm_grd['posted_at'] is not None) for subm_grd in resp.json() if (subm_grd['user_id'] in real_stu_ids)])
return (not all(posted_flags))
| 997,278,230,784,641,700
|
Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')
|
scripts/canvas.py
|
grades_need_posting
|
hsmohammed/rudaux
|
python
|
def grades_need_posting(course, assignment):
"Takes a course object, an assignment name, and get the grades for that assignment from Canvas.\n \n Example:\n course.get_grades(course, 'worksheet_01')"
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions')
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while ((resp is None) or (resp.links['current']['url'] != resp.links['last']['url'])):
resp = requests.get(url=(api_url if (resp is None) else resp.links['next']['url']), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'per_page': '100'})
posted_flags.extend([(subm_grd['posted_at'] is not None) for subm_grd in resp.json() if (subm_grd['user_id'] in real_stu_ids)])
return (not all(posted_flags))
|
def post_grade(course, assignment, student, score):
"Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.\n\n Example:\n course.post_grades(dsci100, 'worksheet_01', '23423', 10)"
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions', student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(url=urllib.parse.urljoin(api_url, student), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'submission': {'posted_grade': score}})
| -5,043,899,444,181,111,000
|
Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.
Example:
course.post_grades(dsci100, 'worksheet_01', '23423', 10)
|
scripts/canvas.py
|
post_grade
|
hsmohammed/rudaux
|
python
|
def post_grade(course, assignment, student, score):
"Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.\n\n Example:\n course.post_grades(dsci100, 'worksheet_01', '23423', 10)"
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join('api', 'v1', 'courses', course['course_id'], 'assignments', assignment_id, 'submissions', student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(url=urllib.parse.urljoin(api_url, student), headers={'Authorization': f'Bearer {token}', 'Accept': 'application/json+canvas-string-ids'}, json={'submission': {'posted_grade': score}})
|
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
'\n Preconditioner\n P = 1 / (||k|| + ε)\n\n Keyword Arguments:\n kpointset --\n '
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if ((nc == 1) and (nk == 1) and (not asPwCoeffs)):
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([(1 / (np.sum((np.array(gkvec.gkvec(i)) ** 2)) + eps)) for i in range(N)])
return DiagonalPreconditioner(D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([(1 / (np.sum((np.array(gkvec.gkvec_cart(i)) ** 2)) + eps)) for i in range(N)])
for ispn in range(nc):
P[(k, ispn)] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
| 1,352,622,070,274,955,300
|
Preconditioner
P = 1 / (||k|| + ε)
Keyword Arguments:
kpointset --
|
python_module/sirius/ot/ot_precondition.py
|
make_kinetic_precond
|
electronic-structure/SIRIUS
|
python
|
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
'\n Preconditioner\n P = 1 / (||k|| + ε)\n\n Keyword Arguments:\n kpointset --\n '
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if ((nc == 1) and (nk == 1) and (not asPwCoeffs)):
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([(1 / (np.sum((np.array(gkvec.gkvec(i)) ** 2)) + eps)) for i in range(N)])
return DiagonalPreconditioner(D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([(1 / (np.sum((np.array(gkvec.gkvec_cart(i)) ** 2)) + eps)) for i in range(N)])
for ispn in range(nc):
P[(k, ispn)] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
|
def checkpoints(self):
'runs movement to levels -- checkpoint when leaving area'
return {'0': self.game, '1': self.good_ending_and_continue, 'bad': self.bad_ending, '3': self.woods_area}
| -567,931,036,030,381,100
|
runs movement to levels -- checkpoint when leaving area
|
chapters/chapter2.py
|
checkpoints
|
JordanLeich/Alpha-Zombie-Survival-Game
|
python
|
def checkpoints(self):
return {'0': self.game, '1': self.good_ending_and_continue, 'bad': self.bad_ending, '3': self.woods_area}
|
def good_ending_and_continue(self):
'Simply plays the good ending scene and then drops the player into chapter 2.'
self.good_ending()
Chapter3().game()
| 7,323,980,889,246,625,000
|
Simply plays the good ending scene and then drops the player into chapter 2.
|
chapters/chapter2.py
|
good_ending_and_continue
|
JordanLeich/Alpha-Zombie-Survival-Game
|
python
|
def good_ending_and_continue(self):
self.good_ending()
Chapter3().game()
|
def game(self):
'start of ch2'
self.start()
print_sleep('Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the carsays 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = ['Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if (choice == '1'):
sounds.zombie_attack_inside()
print_sleep('While attempting to put the car in reverse and head backwards to the local gas station in town, a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if (not player1.user_attack()):
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif (choice == '2'):
print_sleep('You have parked the car near the closet woods area and now need to gather up some supplies for a camp fire.\n', 2)
self.woods_area()
| 8,245,839,575,077,191,000
|
start of ch2
|
chapters/chapter2.py
|
game
|
JordanLeich/Alpha-Zombie-Survival-Game
|
python
|
def game(self):
self.start()
print_sleep('Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the carsays 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = ['Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if (choice == '1'):
sounds.zombie_attack_inside()
print_sleep('While attempting to put the car in reverse and head backwards to the local gas station in town, a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if (not player1.user_attack()):
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif (choice == '2'):
print_sleep('You have parked the car near the closet woods area and now need to gather up some supplies for a camp fire.\n', 2)
self.woods_area()
|
def woods_area(self):
'Checkpoint save 3'
player1.checkpoint_save('3')
print_sleep('You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n', 2)
choices = [str(x) for x in range(1, 3)]
choice_options = ['You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if (choice == '1'):
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into a flame!\n', 2.5)
self.continue_message()
elif (choice == '2'):
sounds.zombie_attack_outside()
print_red('Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards you!\n', 2)
if (not player1.user_attack()):
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
| -3,674,613,718,898,177,000
|
Checkpoint save 3
|
chapters/chapter2.py
|
woods_area
|
JordanLeich/Alpha-Zombie-Survival-Game
|
python
|
def woods_area(self):
player1.checkpoint_save('3')
print_sleep('You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n', 2)
choices = [str(x) for x in range(1, 3)]
choice_options = ['You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if (choice == '1'):
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into a flame!\n', 2.5)
self.continue_message()
elif (choice == '2'):
sounds.zombie_attack_outside()
print_red('Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards you!\n', 2)
if (not player1.user_attack()):
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
|
def __init__(self, mesh):
'*mesh* is the mesh Function.'
self.mesh = asfunction(mesh)
| -8,804,555,952,250,433,000
|
*mesh* is the mesh Function.
|
moviemaker3/math/angle.py
|
__init__
|
friedrichromstedt/moviemaker3
|
python
|
def __init__(self, mesh):
self.mesh = asfunction(mesh)
|
def __call__(self, ps):
'Returns the arctan2. The (y, x) coordinate is in the last \n dimension.'
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
| 5,408,430,055,512,316,000
|
Returns the arctan2. The (y, x) coordinate is in the last
dimension.
|
moviemaker3/math/angle.py
|
__call__
|
friedrichromstedt/moviemaker3
|
python
|
def __call__(self, ps):
'Returns the arctan2. The (y, x) coordinate is in the last \n dimension.'
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
|
def corners_nd(dims, origin=0.5):
'generate relative box corners based on length per dim and\n origin point.\n\n Args:\n dims (float array, shape=[N, ndim]): array of length per dim\n origin (list or array or float): origin point relate to smallest point.\n\n Returns:\n float array, shape=[N, 2 ** ndim, ndim]: returned corners.\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n '
ndim = int(dims.shape[1])
corners_norm = np.stack(np.unravel_index(np.arange((2 ** ndim)), ([2] * ndim)), axis=1).astype(dims.dtype)
if (ndim == 2):
corners_norm = corners_norm[[0, 1, 3, 2]]
elif (ndim == 3):
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = (corners_norm - np.array(origin, dtype=dims.dtype))
corners = (dims.reshape([(- 1), 1, ndim]) * corners_norm.reshape([1, (2 ** ndim), ndim]))
return corners
| 8,539,276,352,659,929,000
|
generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
|
det3d/core/bbox/box_np_ops.py
|
corners_nd
|
motional/polarstream
|
python
|
def corners_nd(dims, origin=0.5):
'generate relative box corners based on length per dim and\n origin point.\n\n Args:\n dims (float array, shape=[N, ndim]): array of length per dim\n origin (list or array or float): origin point relate to smallest point.\n\n Returns:\n float array, shape=[N, 2 ** ndim, ndim]: returned corners.\n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n '
ndim = int(dims.shape[1])
corners_norm = np.stack(np.unravel_index(np.arange((2 ** ndim)), ([2] * ndim)), axis=1).astype(dims.dtype)
if (ndim == 2):
corners_norm = corners_norm[[0, 1, 3, 2]]
elif (ndim == 3):
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = (corners_norm - np.array(origin, dtype=dims.dtype))
corners = (dims.reshape([(- 1), 1, ndim]) * corners_norm.reshape([1, (2 ** ndim), ndim]))
return corners
|
def rbbox2d_to_near_bbox(rbboxes):
"convert rotated bbox to nearest 'standing' or 'lying' bbox.\n Args:\n rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes\n Returns:\n bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes\n "
rots = rbboxes[(..., (- 1))]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > (np.pi / 4))[(..., np.newaxis)]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
| -1,301,025,159,006,912,300
|
convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
|
det3d/core/bbox/box_np_ops.py
|
rbbox2d_to_near_bbox
|
motional/polarstream
|
python
|
def rbbox2d_to_near_bbox(rbboxes):
"convert rotated bbox to nearest 'standing' or 'lying' bbox.\n Args:\n rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes\n Returns:\n bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes\n "
rots = rbboxes[(..., (- 1))]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > (np.pi / 4))[(..., np.newaxis)]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
|
def rotation_2d(points, angles):
'rotation 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n '
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, (- rot_sin)], [rot_sin, rot_cos]])
return np.einsum('aij,jka->aik', points, rot_mat_T)
| -8,212,063,425,262,677,000
|
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
|
det3d/core/bbox/box_np_ops.py
|
rotation_2d
|
motional/polarstream
|
python
|
def rotation_2d(points, angles):
'rotation 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n '
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, (- rot_sin)], [rot_sin, rot_cos]])
return np.einsum('aij,jka->aik', points, rot_mat_T)
|
def rotation_box(box_corners, angle):
'rotation 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angle (float): rotation angle.\n\n Returns:\n float array: same shape as points\n '
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array([[rot_cos, (- rot_sin)], [rot_sin, rot_cos]], dtype=box_corners.dtype)
return (box_corners @ rot_mat_T)
| 6,605,383,920,097,669,000
|
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
|
det3d/core/bbox/box_np_ops.py
|
rotation_box
|
motional/polarstream
|
python
|
def rotation_box(box_corners, angle):
'rotation 2d points based on origin point clockwise when angle positive.\n\n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angle (float): rotation angle.\n\n Returns:\n float array: same shape as points\n '
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array([[rot_cos, (- rot_sin)], [rot_sin, rot_cos]], dtype=box_corners.dtype)
return (box_corners @ rot_mat_T)
|
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
'convert kitti locations, dimensions and angles to corners\n\n Args:\n centers (float array, shape=[N, 3]): locations in kitti label file.\n dims (float array, shape=[N, 3]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n origin (list or array or float): origin point relate to smallest point.\n use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.\n axis (int): rotation axis. 1 for camera and 2 for lidar.\n Returns:\n [type]: [description]\n '
corners = corners_nd(dims, origin=origin)
if (angles is not None):
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([(- 1), 1, 3])
return corners
| 4,548,306,000,528,166,000
|
convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
|
det3d/core/bbox/box_np_ops.py
|
center_to_corner_box3d
|
motional/polarstream
|
python
|
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
'convert kitti locations, dimensions and angles to corners\n\n Args:\n centers (float array, shape=[N, 3]): locations in kitti label file.\n dims (float array, shape=[N, 3]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n origin (list or array or float): origin point relate to smallest point.\n use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.\n axis (int): rotation axis. 1 for camera and 2 for lidar.\n Returns:\n [type]: [description]\n '
corners = corners_nd(dims, origin=origin)
if (angles is not None):
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([(- 1), 1, 3])
return corners
|
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
'convert kitti locations, dimensions and angles to corners.\n format: center(xy), dims(xy), angles(clockwise when positive)\n\n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n\n Returns:\n [type]: [description]\n '
corners = corners_nd(dims, origin=origin)
if (angles is not None):
corners = rotation_2d(corners, angles)
corners += centers.reshape([(- 1), 1, 2])
return corners
| 7,772,419,611,600,366,000
|
convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
|
det3d/core/bbox/box_np_ops.py
|
center_to_corner_box2d
|
motional/polarstream
|
python
|
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
'convert kitti locations, dimensions and angles to corners.\n format: center(xy), dims(xy), angles(clockwise when positive)\n\n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n\n Returns:\n [type]: [description]\n '
corners = corners_nd(dims, origin=origin)
if (angles is not None):
corners = rotation_2d(corners, angles)
corners += centers.reshape([(- 1), 1, 2])
return corners
|
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
'calculate box iou. note that jit version runs 2x faster than cython in\n my machine!\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (((query_boxes[(k, 2)] - query_boxes[(k, 0)]) + eps) * ((query_boxes[(k, 3)] - query_boxes[(k, 1)]) + eps))
for n in range(N):
iw = ((min(boxes[(n, 2)], query_boxes[(k, 2)]) - max(boxes[(n, 0)], query_boxes[(k, 0)])) + eps)
if (iw > 0):
ih = ((min(boxes[(n, 3)], query_boxes[(k, 3)]) - max(boxes[(n, 1)], query_boxes[(k, 1)])) + eps)
if (ih > 0):
ua = (((((boxes[(n, 2)] - boxes[(n, 0)]) + eps) * ((boxes[(n, 3)] - boxes[(n, 1)]) + eps)) + box_area) - (iw * ih))
overlaps[(n, k)] = ((iw * ih) / ua)
return overlaps
| -7,542,823,905,533,092,000
|
calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
|
det3d/core/bbox/box_np_ops.py
|
iou_jit
|
motional/polarstream
|
python
|
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
'calculate box iou. note that jit version runs 2x faster than cython in\n my machine!\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (((query_boxes[(k, 2)] - query_boxes[(k, 0)]) + eps) * ((query_boxes[(k, 3)] - query_boxes[(k, 1)]) + eps))
for n in range(N):
iw = ((min(boxes[(n, 2)], query_boxes[(k, 2)]) - max(boxes[(n, 0)], query_boxes[(k, 0)])) + eps)
if (iw > 0):
ih = ((min(boxes[(n, 3)], query_boxes[(k, 3)]) - max(boxes[(n, 1)], query_boxes[(k, 1)])) + eps)
if (ih > 0):
ua = (((((boxes[(n, 2)] - boxes[(n, 0)]) + eps) * ((boxes[(n, 3)] - boxes[(n, 1)]) + eps)) + box_area) - (iw * ih))
overlaps[(n, k)] = ((iw * ih) / ua)
return overlaps
|
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
'calculate box iou3d,\n ----------\n boxes: (N, 6) ndarray of float\n query_boxes: (K, 6) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = ((((query_boxes[(k, 3)] - query_boxes[(k, 0)]) + add1) * ((query_boxes[(k, 4)] - query_boxes[(k, 1)]) + add1)) * ((query_boxes[(k, 5)] - query_boxes[(k, 2)]) + add1))
for n in range(N):
iw = ((min(boxes[(n, 3)], query_boxes[(k, 3)]) - max(boxes[(n, 0)], query_boxes[(k, 0)])) + add1)
if (iw > 0):
ih = ((min(boxes[(n, 4)], query_boxes[(k, 4)]) - max(boxes[(n, 1)], query_boxes[(k, 1)])) + add1)
if (ih > 0):
il = ((min(boxes[(n, 5)], query_boxes[(k, 5)]) - max(boxes[(n, 2)], query_boxes[(k, 2)])) + add1)
if (il > 0):
ua = float(((((((boxes[(n, 3)] - boxes[(n, 0)]) + add1) * ((boxes[(n, 4)] - boxes[(n, 1)]) + add1)) * ((boxes[(n, 5)] - boxes[(n, 2)]) + add1)) + box_area) - ((iw * ih) * il)))
overlaps[(n, k)] = (((iw * ih) * il) / ua)
return overlaps
| -2,774,315,039,072,902,700
|
calculate box iou3d,
----------
boxes: (N, 6) ndarray of float
query_boxes: (K, 6) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
|
det3d/core/bbox/box_np_ops.py
|
iou_3d_jit
|
motional/polarstream
|
python
|
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
'calculate box iou3d,\n ----------\n boxes: (N, 6) ndarray of float\n query_boxes: (K, 6) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = ((((query_boxes[(k, 3)] - query_boxes[(k, 0)]) + add1) * ((query_boxes[(k, 4)] - query_boxes[(k, 1)]) + add1)) * ((query_boxes[(k, 5)] - query_boxes[(k, 2)]) + add1))
for n in range(N):
iw = ((min(boxes[(n, 3)], query_boxes[(k, 3)]) - max(boxes[(n, 0)], query_boxes[(k, 0)])) + add1)
if (iw > 0):
ih = ((min(boxes[(n, 4)], query_boxes[(k, 4)]) - max(boxes[(n, 1)], query_boxes[(k, 1)])) + add1)
if (ih > 0):
il = ((min(boxes[(n, 5)], query_boxes[(k, 5)]) - max(boxes[(n, 2)], query_boxes[(k, 2)])) + add1)
if (il > 0):
ua = float(((((((boxes[(n, 3)] - boxes[(n, 0)]) + add1) * ((boxes[(n, 4)] - boxes[(n, 1)]) + add1)) * ((boxes[(n, 5)] - boxes[(n, 2)]) + add1)) + box_area) - ((iw * ih) * il)))
overlaps[(n, k)] = (((iw * ih) * il) / ua)
return overlaps
|
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
'calculate box iou nd, 2x slower than iou_jit.\n ----------\n boxes: (N, ndim * 2) ndarray of float\n query_boxes: (K, ndim * 2) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = (boxes.shape[1] // 2)
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = ((query_boxes[(k, ndim)] - query_boxes[(k, 0)]) + add1)
for i in range(1, ndim):
qbox_area *= ((query_boxes[(k, (ndim + i))] - query_boxes[(k, i)]) + add1)
for n in range(N):
invalid = False
for i in range(ndim):
side_length = ((min(boxes[(n, (i + ndim))], query_boxes[(k, (i + ndim))]) - max(boxes[(n, i)], query_boxes[(k, i)])) + add1)
if (side_length <= 0):
invalid = True
break
side_lengths[i] = side_length
if (not invalid):
box_area = ((boxes[(n, ndim)] - boxes[(n, 0)]) + add1)
for i in range(1, ndim):
box_area *= ((boxes[(n, (ndim + i))] - boxes[(n, i)]) + add1)
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
ua = float(((box_area + qbox_area) - inter))
overlaps[(n, k)] = (inter / ua)
return overlaps
| -5,011,801,594,874,465,000
|
calculate box iou nd, 2x slower than iou_jit.
----------
boxes: (N, ndim * 2) ndarray of float
query_boxes: (K, ndim * 2) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
|
det3d/core/bbox/box_np_ops.py
|
iou_nd_jit
|
motional/polarstream
|
python
|
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
'calculate box iou nd, 2x slower than iou_jit.\n ----------\n boxes: (N, ndim * 2) ndarray of float\n query_boxes: (K, ndim * 2) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n '
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = (boxes.shape[1] // 2)
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = ((query_boxes[(k, ndim)] - query_boxes[(k, 0)]) + add1)
for i in range(1, ndim):
qbox_area *= ((query_boxes[(k, (ndim + i))] - query_boxes[(k, i)]) + add1)
for n in range(N):
invalid = False
for i in range(ndim):
side_length = ((min(boxes[(n, (i + ndim))], query_boxes[(k, (i + ndim))]) - max(boxes[(n, i)], query_boxes[(k, i)])) + add1)
if (side_length <= 0):
invalid = True
break
side_lengths[i] = side_length
if (not invalid):
box_area = ((boxes[(n, ndim)] - boxes[(n, 0)]) + add1)
for i in range(1, ndim):
box_area *= ((boxes[(n, (ndim + i))] - boxes[(n, i)]) + add1)
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
ua = float(((box_area + qbox_area) - inter))
overlaps[(n, k)] = (inter / ua)
return overlaps
|
def corner_to_surfaces_3d(corners):
'convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners.\n Returns:\n surfaces (float array, [N, 6, 4, 3]):\n '
surfaces = np.array([[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]]]).transpose([2, 0, 1, 3])
return surfaces
| -3,105,657,895,945,397,000
|
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
|
det3d/core/bbox/box_np_ops.py
|
corner_to_surfaces_3d
|
motional/polarstream
|
python
|
def corner_to_surfaces_3d(corners):
'convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners.\n Returns:\n surfaces (float array, [N, 6, 4, 3]):\n '
surfaces = np.array([[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]]]).transpose([2, 0, 1, 3])
return surfaces
|
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
'convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners.\n Returns:\n surfaces (float array, [N, 6, 4, 3]):\n '
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array([0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[(i, j, k)] = corners[(i, corner_idxes[(j, k)])]
return surfaces
| 8,323,415,292,507,754,000
|
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
|
det3d/core/bbox/box_np_ops.py
|
corner_to_surfaces_3d_jit
|
motional/polarstream
|
python
|
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
'convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners.\n Returns:\n surfaces (float array, [N, 6, 4, 3]):\n '
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array([0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[(i, j, k)] = corners[(i, corner_idxes[(j, k)])]
return surfaces
|
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
'assign a 0/1 label to each voxel based on whether\n the center of voxel is in gt_box. LIDAR.\n '
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = ((coors[:, ::(- 1)] * voxel_size) + shift)
voxel_centers = (voxel_origins + (voxel_size * 0.5))
gt_box_corners = center_to_corner_box3d((gt_boxes[:, :3] - (voxel_size * 0.5)), (gt_boxes[:, 3:6] + voxel_size), gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
| 8,134,859,055,966,454,000
|
assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
|
det3d/core/bbox/box_np_ops.py
|
assign_label_to_voxel
|
motional/polarstream
|
python
|
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
'assign a 0/1 label to each voxel based on whether\n the center of voxel is in gt_box. LIDAR.\n '
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = ((coors[:, ::(- 1)] * voxel_size) + shift)
voxel_centers = (voxel_origins + (voxel_size * 0.5))
gt_box_corners = center_to_corner_box3d((gt_boxes[:, :3] - (voxel_size * 0.5)), (gt_boxes[:, 3:6] + voxel_size), gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
|
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
'assign a 0/1 label to each voxel based on whether\n the center of voxel is in gt_box. LIDAR.\n '
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = ((coors[:, ::(- 1)] * voxel_size) + shift)
voxel_maxes = (voxel_origins + voxel_size)
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=(- 1))
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([(- 1), 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([(- 1), 8, ret.shape[(- 1)]])
return ret.any((- 1)).any((- 1)).astype(np.int64)
| 4,818,000,534,278,983,000
|
assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
|
det3d/core/bbox/box_np_ops.py
|
assign_label_to_voxel_v3
|
motional/polarstream
|
python
|
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
'assign a 0/1 label to each voxel based on whether\n the center of voxel is in gt_box. LIDAR.\n '
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = ((coors[:, ::(- 1)] * voxel_size) + shift)
voxel_maxes = (voxel_origins + voxel_size)
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=(- 1))
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=[0.5, 0.5, 0.5], axis=2)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([(- 1), 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([(- 1), 8, ret.shape[(- 1)]])
return ret.any((- 1)).any((- 1)).astype(np.int64)
|
def image_box_region_area(img_cumsum, bbox):
'check a 2d voxel is contained by a box. used to filter empty\n anchors.\n Summed-area table algorithm:\n ==> W\n ------------------\n | | |\n |------A---------B\n | | |\n | | |\n |----- C---------D\n Iabcd = ID-IB-IC+IA\n Args:\n img_cumsum: [M, H, W](yx) cumsumed image.\n bbox: [N, 4](xyxy) bounding box,\n '
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = (((ID - IB) - IC) + IA)
return ret
| 5,212,201,778,767,590,000
|
check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
|
det3d/core/bbox/box_np_ops.py
|
image_box_region_area
|
motional/polarstream
|
python
|
def image_box_region_area(img_cumsum, bbox):
'check a 2d voxel is contained by a box. used to filter empty\n anchors.\n Summed-area table algorithm:\n ==> W\n ------------------\n | | |\n |------A---------B\n | | |\n | | |\n |----- C---------D\n Iabcd = ID-IB-IC+IA\n Args:\n img_cumsum: [M, H, W](yx) cumsumed image.\n bbox: [N, 4](xyxy) bounding box,\n '
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = (((ID - IB) - IC) + IA)
return ret
|
def __init__(self):
'\n\t\tCreates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS\n\t\t'
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
self.add_edges([])
self['mm__'] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self['MT_constraint__'] = 'return True'
self['name'] = ''
self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'HContractUnitR03_ConnectedLHS')
self['equations'] = []
self.add_node()
self.vs[0]['MT_pre__attr1'] = 'return True'
self.vs[0]['MT_label__'] = '1'
self.vs[0]['mm__'] = 'MT_pre__Class'
self.vs[0]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'Class')
self.add_edges([])
| -8,516,995,704,198,999,000
|
Creates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS
|
UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py
|
__init__
|
levilucio/SyVOLT
|
python
|
def __init__(self):
'\n\t\t\n\t\t'
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
self.add_edges([])
self['mm__'] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self['MT_constraint__'] = 'return True'
self['name'] =
self['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'HContractUnitR03_ConnectedLHS')
self['equations'] = []
self.add_node()
self.vs[0]['MT_pre__attr1'] = 'return True'
self.vs[0]['MT_label__'] = '1'
self.vs[0]['mm__'] = 'MT_pre__Class'
self.vs[0]['GUID__'] = uuid.uuid3(uuid.NAMESPACE_DNS, 'Class')
self.add_edges([])
|
def rand_permute_adj_matrix(matrix):
'Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity\n between them.'
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
| 2,072,083,524,283,573,000
|
Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity
between them.
|
utils/graph_utils.py
|
rand_permute_adj_matrix
|
BrunoKM/rhoana_graph_tools
|
python
|
def rand_permute_adj_matrix(matrix):
'Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity\n between them.'
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
|
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
'Calculate the graph edit distance between two graphs'
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
| -1,019,193,061,419,621,200
|
Calculate the graph edit distance between two graphs
|
utils/graph_utils.py
|
ged_from_adj
|
BrunoKM/rhoana_graph_tools
|
python
|
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
|
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
'Calculate the graph edit distance between two graphs using the networkx implementation'
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
| -6,871,451,744,190,802,000
|
Calculate the graph edit distance between two graphs using the networkx implementation
|
utils/graph_utils.py
|
ged_from_adj_nx
|
BrunoKM/rhoana_graph_tools
|
python
|
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
|
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
'Calculate the graph edit distance between two graphs using the ged4py implementation'
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
| -2,015,968,644,657,250,800
|
Calculate the graph edit distance between two graphs using the ged4py implementation
|
utils/graph_utils.py
|
ged_from_adj_ged4py
|
BrunoKM/rhoana_graph_tools
|
python
|
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
|
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
'Checks whether two graphs are isomorphic taking adjacency matrices as inputs'
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
| 5,955,937,699,591,090,000
|
Checks whether two graphs are isomorphic taking adjacency matrices as inputs
|
utils/graph_utils.py
|
is_isomorphic_from_adj
|
BrunoKM/rhoana_graph_tools
|
python
|
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
|
def train(self, epoch: int) -> None:
'\n Train an epoch\n\n Parameters\n ----------\n epoch : int\n Current number of epoch\n '
self.decoder.train()
self.encoder.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(tag='loss', writer=self.writer)
top5accs = AverageMeter(tag='top5acc', writer=self.writer)
start = time.time()
for (i, (imgs, caps, caplens)) in enumerate(self.train_loader):
data_time.update((time.time() - start))
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
imgs = self.encoder(imgs)
if (self.caption_model == 'att2all'):
(scores, caps_sorted, decode_lengths, alphas, sort_ind) = self.decoder(imgs, caps, caplens)
else:
(scores, caps_sorted, decode_lengths, sort_ind) = self.decoder(imgs, caps, caplens)
targets = caps_sorted[:, 1:]
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.loss_function(scores, targets)
if (self.caption_model == 'att2all'):
loss += (self.tau * ((1.0 - alphas.sum(dim=1)) ** 2).mean())
self.decoder_optimizer.zero_grad()
if (self.encoder_optimizer is not None):
self.encoder_optimizer.zero_grad()
loss.backward()
if (self.grad_clip is not None):
clip_gradient(self.decoder_optimizer, self.grad_clip)
if (self.encoder_optimizer is not None):
clip_gradient(self.encoder_optimizer, self.grad_clip)
self.decoder_optimizer.step()
if (self.encoder_optimizer is not None):
self.encoder_optimizer.step()
step = (((epoch - 1) * self.len_epoch) + i)
self.writer.set_step(step=step, mode='train')
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update((time.time() - start))
start = time.time()
if ((i % self.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tBatch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\tData Load Time {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tTop-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(self.train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top5=top5accs))
| 6,085,474,841,145,883,000
|
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
|
trainer/trainer.py
|
train
|
Renovamen/Image-Caption
|
python
|
def train(self, epoch: int) -> None:
'\n Train an epoch\n\n Parameters\n ----------\n epoch : int\n Current number of epoch\n '
self.decoder.train()
self.encoder.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(tag='loss', writer=self.writer)
top5accs = AverageMeter(tag='top5acc', writer=self.writer)
start = time.time()
for (i, (imgs, caps, caplens)) in enumerate(self.train_loader):
data_time.update((time.time() - start))
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
imgs = self.encoder(imgs)
if (self.caption_model == 'att2all'):
(scores, caps_sorted, decode_lengths, alphas, sort_ind) = self.decoder(imgs, caps, caplens)
else:
(scores, caps_sorted, decode_lengths, sort_ind) = self.decoder(imgs, caps, caplens)
targets = caps_sorted[:, 1:]
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.loss_function(scores, targets)
if (self.caption_model == 'att2all'):
loss += (self.tau * ((1.0 - alphas.sum(dim=1)) ** 2).mean())
self.decoder_optimizer.zero_grad()
if (self.encoder_optimizer is not None):
self.encoder_optimizer.zero_grad()
loss.backward()
if (self.grad_clip is not None):
clip_gradient(self.decoder_optimizer, self.grad_clip)
if (self.encoder_optimizer is not None):
clip_gradient(self.encoder_optimizer, self.grad_clip)
self.decoder_optimizer.step()
if (self.encoder_optimizer is not None):
self.encoder_optimizer.step()
step = (((epoch - 1) * self.len_epoch) + i)
self.writer.set_step(step=step, mode='train')
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update((time.time() - start))
start = time.time()
if ((i % self.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tBatch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\tData Load Time {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tTop-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(self.train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top5=top5accs))
|
def validate(self) -> float:
'\n Validate an epoch.\n\n Returns\n -------\n bleu4 : float\n BLEU-4 score\n '
self.decoder.eval()
if (self.encoder is not None):
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list()
prediction = list()
with torch.no_grad():
for (i, (imgs, caps, caplens, allcaps)) in enumerate(self.val_loader):
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
if (self.encoder is not None):
imgs = self.encoder(imgs)
if (self.caption_model == 'att2all'):
(scores, caps_sorted, decode_lengths, alphas, sort_ind) = self.decoder(imgs, caps, caplens)
else:
(scores, caps_sorted, decode_lengths, sort_ind) = self.decoder(imgs, caps, caplens)
targets = caps_sorted[:, 1:]
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.loss_function(scores, targets)
if (self.caption_model == 'att2all'):
loss += (self.tau * ((1.0 - alphas.sum(dim=1)) ** 2).mean())
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update((time.time() - start))
start = time.time()
if ((i % self.print_freq) == 0):
print('Validation: [{0}/{1}]\tBatch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tTop-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader), batch_time=batch_time, loss=losses, top5=top5accs))
allcaps = allcaps[sort_ind]
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(map((lambda c: [w for w in c if (w not in {self.word_map['<start>'], self.word_map['<pad>']})]), img_caps))
ground_truth.append(img_captions)
(_, preds) = torch.max(scores_copy, dim=2)
preds = preds.tolist()
temp_preds = list()
for (j, p) in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]])
preds = temp_preds
prediction.extend(preds)
assert (len(ground_truth) == len(prediction))
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3]
cider = metrics.cider
print('\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(loss=losses, top5=top5accs, bleu=bleu4, cider=cider))
return bleu4
| 3,469,363,881,887,474,700
|
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
|
trainer/trainer.py
|
validate
|
Renovamen/Image-Caption
|
python
|
def validate(self) -> float:
'\n Validate an epoch.\n\n Returns\n -------\n bleu4 : float\n BLEU-4 score\n '
self.decoder.eval()
if (self.encoder is not None):
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list()
prediction = list()
with torch.no_grad():
for (i, (imgs, caps, caplens, allcaps)) in enumerate(self.val_loader):
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
if (self.encoder is not None):
imgs = self.encoder(imgs)
if (self.caption_model == 'att2all'):
(scores, caps_sorted, decode_lengths, alphas, sort_ind) = self.decoder(imgs, caps, caplens)
else:
(scores, caps_sorted, decode_lengths, sort_ind) = self.decoder(imgs, caps, caplens)
targets = caps_sorted[:, 1:]
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
loss = self.loss_function(scores, targets)
if (self.caption_model == 'att2all'):
loss += (self.tau * ((1.0 - alphas.sum(dim=1)) ** 2).mean())
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update((time.time() - start))
start = time.time()
if ((i % self.print_freq) == 0):
print('Validation: [{0}/{1}]\tBatch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tTop-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader), batch_time=batch_time, loss=losses, top5=top5accs))
allcaps = allcaps[sort_ind]
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(map((lambda c: [w for w in c if (w not in {self.word_map['<start>'], self.word_map['<pad>']})]), img_caps))
ground_truth.append(img_captions)
(_, preds) = torch.max(scores_copy, dim=2)
preds = preds.tolist()
temp_preds = list()
for (j, p) in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]])
preds = temp_preds
prediction.extend(preds)
assert (len(ground_truth) == len(prediction))
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3]
cider = metrics.cider
print('\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(loss=losses, top5=top5accs, bleu=bleu4, cider=cider))
return bleu4
|
def _get_all_query_string(self, changelist):
"\n If there's a default value set the all parameter needs to be provided\n however, if a default is not set the all parameter is not required.\n "
if self.default_filter_value:
return changelist.get_query_string({self.parameter_name: self.show_all_param_value})
return changelist.get_query_string(remove=[self.parameter_name])
| 7,343,347,246,114,303,000
|
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
|
djangocms_content_expiry/filters.py
|
_get_all_query_string
|
Aiky30/djangocms-content-expiry
|
python
|
def _get_all_query_string(self, changelist):
"\n If there's a default value set the all parameter needs to be provided\n however, if a default is not set the all parameter is not required.\n "
if self.default_filter_value:
return changelist.get_query_string({self.parameter_name: self.show_all_param_value})
return changelist.get_query_string(remove=[self.parameter_name])
|
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
'Do not use locals() or self.__dict__ for string formatting.\n\n Okay: \'locals()\'\n Okay: \'locals\'\n Okay: locals()\n Okay: print(locals())\n H501: print("%(something)" % locals())\n H501: LOG.info(_("%(something)") % self.__dict__)\n Okay: print("%(something)" % locals()) # noqa\n '
if noqa:
return
for_formatting = False
for (token_type, text, start, _, _) in tokens:
if ((text == '%') and (token_type == tokenize.OP)):
for_formatting = True
if (for_formatting and (token_type == tokenize.NAME)):
for (k, v) in LOCALS_TEXT_MAP.items():
if ((text == k) and (v in logical_line)):
(yield (start[1], ('H501: Do not use %s for string formatting' % v)))
| 7,383,045,247,385,087,000
|
Do not use locals() or self.__dict__ for string formatting.
Okay: 'locals()'
Okay: 'locals'
Okay: locals()
Okay: print(locals())
H501: print("%(something)" % locals())
H501: LOG.info(_("%(something)") % self.__dict__)
Okay: print("%(something)" % locals()) # noqa
|
hacking/checks/dictlist.py
|
hacking_no_locals
|
UbuntuEvangelist/hacking
|
python
|
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
'Do not use locals() or self.__dict__ for string formatting.\n\n Okay: \'locals()\'\n Okay: \'locals\'\n Okay: locals()\n Okay: print(locals())\n H501: print("%(something)" % locals())\n H501: LOG.info(_("%(something)") % self.__dict__)\n Okay: print("%(something)" % locals()) # noqa\n '
if noqa:
return
for_formatting = False
for (token_type, text, start, _, _) in tokens:
if ((text == '%') and (token_type == tokenize.OP)):
for_formatting = True
if (for_formatting and (token_type == tokenize.NAME)):
for (k, v) in LOCALS_TEXT_MAP.items():
if ((text == k) and (v in logical_line)):
(yield (start[1], ('H501: Do not use %s for string formatting' % v)))
|
def deal_card():
'Return random card'
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
| -3,847,650,605,205,713,000
|
Return random card
|
Programs/day_11_blackjack.py
|
deal_card
|
Yunram/python_training
|
python
|
def deal_card():
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
|
def calculate_score(cards):
'Take a list of cards and return the score'
if ((sum(cards) == 21) and (len(cards) == 2)):
return 0
if ((11 in cards) and (sum(cards) > 21)):
cards.remove(11)
cards.append(1)
return sum(cards)
| 6,349,374,628,700,159,000
|
Take a list of cards and return the score
|
Programs/day_11_blackjack.py
|
calculate_score
|
Yunram/python_training
|
python
|
def calculate_score(cards):
if ((sum(cards) == 21) and (len(cards) == 2)):
return 0
if ((11 in cards) and (sum(cards) > 21)):
cards.remove(11)
cards.append(1)
return sum(cards)
|
def jupyterbook():
'\n Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro\n\n This function is called directly from bin/doconce\n '
if (len(sys.argv) < 2):
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if (option('help') or ('-h' in sys.argv)):
print_help_jupyterbook()
sys.exit(1)
if (not check_command_line_options(1, option_list=(_legal_cmdline_opts_jupyterbook + _legal_command_line_options))):
_abort()
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default='', option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default='')
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
(dirname, basename, ext, filename) = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if (not filename):
errwarn(('*** error: file %s does not exist' % globals.filename))
_abort()
globals.dirname = dirname
if dirname:
os.chdir(dirname)
errwarn(('*** doconce format now works in directory %s' % dirname))
dest = (os.path.relpath((dest or '.'), start=dirname) + '/')
if dest.startswith('./'):
dest = dest[2:]
dest_toc = (os.path.relpath((dest_toc or '.'), start=dirname) + '/')
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
globals.filename = filename
globals.dofile_basename = basename
_rmdolog()
preprocessor_options = [arg for arg in sys.argv[1:] if (not arg.startswith('--'))]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
for tag in ('TITLE', 'AUTHOR', 'DATE'):
if re.search(('^%s:.*' % tag), filestr, re.MULTILINE):
errwarn(('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower()))
filestr = re.sub(('^%s:.*' % tag), '', filestr, flags=re.MULTILINE)
tag = 'TOC'
if re.search(('^%s:.*' % tag), filestr, re.MULTILINE):
errwarn(('*** warning : Removing the %s tag' % tag.lower()))
filestr = re.sub(('^%s:.*' % tag), '', filestr, flags=re.MULTILINE)
pattern_tag = '[\\w _\\-]*'
pattern = (((('cite(?:(\\[' + pattern_tag) + '\\]))?\\{(') + pattern_tag) + ')\\}')
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
m = re.search('\\A\\s*^(?:#.*\\s*|!split\\s*)*', filestr, re.MULTILINE)
if m:
filestr = filestr[m.end():]
"skip = ''\n for line in filestr.splitlines():\n if not line.strip():\n skip += line + '\n'\n elif not line.startswith('#') and not line.startswith('!'):\n break\n else:\n skip += line +'\n'\n filestr = filestr[len(skip):]\n "
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = ([[]] * len(chapters))
sec_title_list_auto = None
if sep_section:
for (c, chap) in enumerate(chapters):
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = (m.start() if m else 0)
chapters[c] = split_file(chap[:pos_sep_section], INLINE_TAGS[sep_section])[0]
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
(chapter_titles, sec_title_list) = read_title_file(titles_opt, chapters, sec_list)
def int_formatter(_list):
return (('%0' + str(max(2, (math.floor(math.log((len(_list) + 0.01), 10)) + 1)))) + 'd_')
chapter_formatter = int_formatter(chapters)
(chapters, chapter_titles, chapter_titles_auto) = titles_to_chunks(chapters, chapter_titles, sep=sep, chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [((chapter_formatter % (i + 1)) + basename) for i in range(len(chapters))]
sec_basename_list = ([[]] * len(chapters))
if sep_section:
sec_title_list_auto = ([[]] * len(sec_title_list))
for (c, sections) in enumerate(sec_list):
section_formatter = ((chapter_formatter % (c + 1)) + int_formatter(sections))
(sec_list[c], section_titles, section_titles_auto) = titles_to_chunks(sections, sec_title_list[c], sep=sep_section, sep2=sep, chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [((section_formatter % (i + 1)) + basename) for i in range(len(sections))]
if show_titles_opt:
if (sep_section == ''):
print(('\n===== Titles detected using the %s separator:' % sep))
else:
print(('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section)))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
all_texts = []
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
all_suffix = identify_format(all_texts)
all_fnames = [(b + s) for (b, s) in zip(all_basenames, all_suffix)]
all_markings = list(map((lambda x: ('!split\n<!-- jupyter-book %s -->\n' % x)), all_fnames))
all_texts = [(m + t) for (m, t) in zip(all_markings, all_texts)]
filestr = ''.join(all_texts)
(filestr_md, bg_session) = doconce2format(filestr, 'pandoc')
(filestr_ipynb, bg_session) = doconce2format(filestr, 'ipynb')
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if (len(all_texts_md) != len(all_texts_ipynb)):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
all_texts_formatted = ([[]] * len(all_fnames))
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
all_texts_formatted = [fix_media_src(t, '', dest) for t in all_texts_formatted]
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], (dest + all_fnames[i]), _encoding=globals.encoding)
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, (dest_toc + '_toc.yml'), _encoding=globals.encoding)
print(('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' % (len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest))))
| 5,549,780,974,407,250,000
|
Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro
This function is called directly from bin/doconce
|
lib/doconce/jupyterbook.py
|
jupyterbook
|
aless80/doconce
|
python
|
def jupyterbook():
'\n Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro\n\n This function is called directly from bin/doconce\n '
if (len(sys.argv) < 2):
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if (option('help') or ('-h' in sys.argv)):
print_help_jupyterbook()
sys.exit(1)
if (not check_command_line_options(1, option_list=(_legal_cmdline_opts_jupyterbook + _legal_command_line_options))):
_abort()
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default=, option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default=)
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
(dirname, basename, ext, filename) = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if (not filename):
errwarn(('*** error: file %s does not exist' % globals.filename))
_abort()
globals.dirname = dirname
if dirname:
os.chdir(dirname)
errwarn(('*** doconce format now works in directory %s' % dirname))
dest = (os.path.relpath((dest or '.'), start=dirname) + '/')
if dest.startswith('./'):
dest = dest[2:]
dest_toc = (os.path.relpath((dest_toc or '.'), start=dirname) + '/')
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
globals.filename = filename
globals.dofile_basename = basename
_rmdolog()
preprocessor_options = [arg for arg in sys.argv[1:] if (not arg.startswith('--'))]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
for tag in ('TITLE', 'AUTHOR', 'DATE'):
if re.search(('^%s:.*' % tag), filestr, re.MULTILINE):
errwarn(('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower()))
filestr = re.sub(('^%s:.*' % tag), , filestr, flags=re.MULTILINE)
tag = 'TOC'
if re.search(('^%s:.*' % tag), filestr, re.MULTILINE):
errwarn(('*** warning : Removing the %s tag' % tag.lower()))
filestr = re.sub(('^%s:.*' % tag), , filestr, flags=re.MULTILINE)
pattern_tag = '[\\w _\\-]*'
pattern = (((('cite(?:(\\[' + pattern_tag) + '\\]))?\\{(') + pattern_tag) + ')\\}')
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
m = re.search('\\A\\s*^(?:#.*\\s*|!split\\s*)*', filestr, re.MULTILINE)
if m:
filestr = filestr[m.end():]
"skip = \n for line in filestr.splitlines():\n if not line.strip():\n skip += line + '\n'\n elif not line.startswith('#') and not line.startswith('!'):\n break\n else:\n skip += line +'\n'\n filestr = filestr[len(skip):]\n "
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = ([[]] * len(chapters))
sec_title_list_auto = None
if sep_section:
for (c, chap) in enumerate(chapters):
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = (m.start() if m else 0)
chapters[c] = split_file(chap[:pos_sep_section], INLINE_TAGS[sep_section])[0]
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
(chapter_titles, sec_title_list) = read_title_file(titles_opt, chapters, sec_list)
def int_formatter(_list):
return (('%0' + str(max(2, (math.floor(math.log((len(_list) + 0.01), 10)) + 1)))) + 'd_')
chapter_formatter = int_formatter(chapters)
(chapters, chapter_titles, chapter_titles_auto) = titles_to_chunks(chapters, chapter_titles, sep=sep, chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [((chapter_formatter % (i + 1)) + basename) for i in range(len(chapters))]
sec_basename_list = ([[]] * len(chapters))
if sep_section:
sec_title_list_auto = ([[]] * len(sec_title_list))
for (c, sections) in enumerate(sec_list):
section_formatter = ((chapter_formatter % (c + 1)) + int_formatter(sections))
(sec_list[c], section_titles, section_titles_auto) = titles_to_chunks(sections, sec_title_list[c], sep=sep_section, sep2=sep, chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [((section_formatter % (i + 1)) + basename) for i in range(len(sections))]
if show_titles_opt:
if (sep_section == ):
print(('\n===== Titles detected using the %s separator:' % sep))
else:
print(('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section)))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
all_texts = []
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
all_suffix = identify_format(all_texts)
all_fnames = [(b + s) for (b, s) in zip(all_basenames, all_suffix)]
all_markings = list(map((lambda x: ('!split\n<!-- jupyter-book %s -->\n' % x)), all_fnames))
all_texts = [(m + t) for (m, t) in zip(all_markings, all_texts)]
filestr = .join(all_texts)
(filestr_md, bg_session) = doconce2format(filestr, 'pandoc')
(filestr_ipynb, bg_session) = doconce2format(filestr, 'ipynb')
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if (len(all_texts_md) != len(all_texts_ipynb)):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
all_texts_formatted = ([[]] * len(all_fnames))
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
all_texts_formatted = [fix_media_src(t, , dest) for t in all_texts_formatted]
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], (dest + all_fnames[i]), _encoding=globals.encoding)
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, (dest_toc + '_toc.yml'), _encoding=globals.encoding)
print(('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' % (len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest))))
|
def split_file(filestr, separator):
"Split the text of a doconce file by a regex string.\n\n Split the text of a doconce file by a separator regex (e.g. the values of\n the INLINE_TAGS dictionary from common.py) and return the chunks of text.\n Note that the first chunk contains any text before the first separator.\n :param str filestr: text string\n :param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py\n :return: list of text chunks\n :rtype: list[str]\n "
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if (re.search(c, filestr) is None):
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if (m.start() == 0):
continue
if (filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec')):
errwarn('*** warning : skipped a separator, which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
| -3,129,595,768,777,523,700
|
Split the text of a doconce file by a regex string.
Split the text of a doconce file by a separator regex (e.g. the values of
the INLINE_TAGS dictionary from common.py) and return the chunks of text.
Note that the first chunk contains any text before the first separator.
:param str filestr: text string
:param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py
:return: list of text chunks
:rtype: list[str]
|
lib/doconce/jupyterbook.py
|
split_file
|
aless80/doconce
|
python
|
def split_file(filestr, separator):
"Split the text of a doconce file by a regex string.\n\n Split the text of a doconce file by a separator regex (e.g. the values of\n the INLINE_TAGS dictionary from common.py) and return the chunks of text.\n Note that the first chunk contains any text before the first separator.\n :param str filestr: text string\n :param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py\n :return: list of text chunks\n :rtype: list[str]\n "
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if (re.search(c, filestr) is None):
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if (m.start() == 0):
continue
if (filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec')):
errwarn('*** warning : skipped a separator, which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
|
def split_ipynb(ipynb_text, filenames):
'Split a Jupyter notebook based on filenames present in its blocks\n\n Given the text of a Jupyter notebook marked with the output filename\n in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of\n Jupyter notebooks separated accordingly.\n :param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->\n :param list[str] filenames: filenames\n :return: ipynb_texts with the ipynb code for each block\n :rtype: list[str]\n '
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
ind_fname = []
block_sources = [''.join(c['source']) for c in cells]
for fname in filenames:
marking = ('<!-- jupyter-book % s -->' % fname)
for (b, block) in enumerate(block_sources):
if (block.find(marking) > (- 1)):
ind_fname.append(b)
break
if (len(ind_fname) != len(filenames)):
errwarn('*** error : could not find all markings in ipynb')
_abort()
ipynb_texts = ([''] * len(filenames))
for (i, ind_start) in enumerate(ind_fname):
ind_end = None
if ((i + 1) < len(ind_fname)):
ind_end = ind_fname[(i + 1)]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
| 985,091,436,715,346,400
|
Split a Jupyter notebook based on filenames present in its blocks
Given the text of a Jupyter notebook marked with the output filename
in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of
Jupyter notebooks separated accordingly.
:param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->
:param list[str] filenames: filenames
:return: ipynb_texts with the ipynb code for each block
:rtype: list[str]
|
lib/doconce/jupyterbook.py
|
split_ipynb
|
aless80/doconce
|
python
|
def split_ipynb(ipynb_text, filenames):
'Split a Jupyter notebook based on filenames present in its blocks\n\n Given the text of a Jupyter notebook marked with the output filename\n in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of\n Jupyter notebooks separated accordingly.\n :param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->\n :param list[str] filenames: filenames\n :return: ipynb_texts with the ipynb code for each block\n :rtype: list[str]\n '
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
ind_fname = []
block_sources = [.join(c['source']) for c in cells]
for fname in filenames:
marking = ('<!-- jupyter-book % s -->' % fname)
for (b, block) in enumerate(block_sources):
if (block.find(marking) > (- 1)):
ind_fname.append(b)
break
if (len(ind_fname) != len(filenames)):
errwarn('*** error : could not find all markings in ipynb')
_abort()
ipynb_texts = ([] * len(filenames))
for (i, ind_start) in enumerate(ind_fname):
ind_end = None
if ((i + 1) < len(ind_fname)):
ind_end = ind_fname[(i + 1)]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
|
def read_title_file(titles_opt, chapters, sec_list):
"Helper function to read and process a file with titles\n\n Read the file containing titles and process them according to the number of jupyter-book chapters and sections.\n len(sec_list) should be the same as len(chapters), and its elements can be empty lists\n :param str titles_opt: 'auto' or file containing titles\n :param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters\n :param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.\n :return: tuple with chapter and section titles\n :rtype: (list[str], list[list[str]])\n "
chapter_titles = []
sec_title_list = ([[]] * len(chapters))
if (titles_opt != 'auto'):
chapter_titles = ([''] * len(chapters))
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = (input_titles.pop(0) if len(input_titles) else '')
section = []
for _ in range(len(sec_list[c])):
section.append((input_titles.pop(0) if len(input_titles) else ''))
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. These titles will be ignored')
return (chapter_titles, sec_title_list)
| 1,563,216,286,263,243,000
|
Helper function to read and process a file with titles
Read the file containing titles and process them according to the number of jupyter-book chapters and sections.
len(sec_list) should be the same as len(chapters), and its elements can be empty lists
:param str titles_opt: 'auto' or file containing titles
:param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters
:param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.
:return: tuple with chapter and section titles
:rtype: (list[str], list[list[str]])
|
lib/doconce/jupyterbook.py
|
read_title_file
|
aless80/doconce
|
python
|
def read_title_file(titles_opt, chapters, sec_list):
"Helper function to read and process a file with titles\n\n Read the file containing titles and process them according to the number of jupyter-book chapters and sections.\n len(sec_list) should be the same as len(chapters), and its elements can be empty lists\n :param str titles_opt: 'auto' or file containing titles\n :param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters\n :param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.\n :return: tuple with chapter and section titles\n :rtype: (list[str], list[list[str]])\n "
chapter_titles = []
sec_title_list = ([[]] * len(chapters))
if (titles_opt != 'auto'):
chapter_titles = ([] * len(chapters))
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = (input_titles.pop(0) if len(input_titles) else )
section = []
for _ in range(len(sec_list[c])):
section.append((input_titles.pop(0) if len(input_titles) else ))
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. These titles will be ignored')
return (chapter_titles, sec_title_list)
|
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
'Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)\n\n Jupyter-book files must have a # header with the title (see doc jupyter-book >\n Types of content source files > Rules for all content types). This function\n extracts title from the title file or from the headers given by the separator\n provided in the options. If no title is found, provide a default title as e.g.\n 03_mydoconcefile.\n\n :param list[str] chunks: list of text string\n :param list[str] title_list: titles for the chunks. Empty if --titles is us\n :param str sep: separator: chapter|section|subsection\n :param str sep2: second separator in case the first fails: chapter|section|subsection\n :param dict tags: tag patterns, e.g. INLINE_TAGS from common.py\n :param str chapter_formatter: formatter for default filenames\n :return: tuple with the chunks of text having a # header, titles, titles detected\n :rtype: (list[str], list[str], list[str])\n '
title_list_out = title_list.copy()
if (not len(title_list_out)):
title_list_out = ([''] * len(chunks))
title_list_detected = ([''] * len(chunks))
for (i, chunk) in enumerate(chunks):
title = ''
if (title == ''):
(chunk, title) = create_title(chunk, sep, tags)
if ((title == '') and sep2):
(chunk, title) = create_title(chunk, sep2, tags)
if (title == ''):
title = ((chapter_formatter % (i + 1)) + globals.dofile_basename)
title_list_detected[i] = title
if (i < len(title_list)):
if title_list[i]:
title = title_list[i]
title_list_out[i] = title
chunk = ((((((('=' * 9) + ' ') + title) + ' ') + ('=' * 9)) + '\n') + chunk)
chunks[i] = chunk
return (chunks, title_list_out, title_list_detected)
| -4,218,107,978,038,146,000
|
Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)
Jupyter-book files must have a # header with the title (see doc jupyter-book >
Types of content source files > Rules for all content types). This function
extracts title from the title file or from the headers given by the separator
provided in the options. If no title is found, provide a default title as e.g.
03_mydoconcefile.
:param list[str] chunks: list of text string
:param list[str] title_list: titles for the chunks. Empty if --titles is us
:param str sep: separator: chapter|section|subsection
:param str sep2: second separator in case the first fails: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:param str chapter_formatter: formatter for default filenames
:return: tuple with the chunks of text having a # header, titles, titles detected
:rtype: (list[str], list[str], list[str])
|
lib/doconce/jupyterbook.py
|
titles_to_chunks
|
aless80/doconce
|
python
|
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
'Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)\n\n Jupyter-book files must have a # header with the title (see doc jupyter-book >\n Types of content source files > Rules for all content types). This function\n extracts title from the title file or from the headers given by the separator\n provided in the options. If no title is found, provide a default title as e.g.\n 03_mydoconcefile.\n\n :param list[str] chunks: list of text string\n :param list[str] title_list: titles for the chunks. Empty if --titles is us\n :param str sep: separator: chapter|section|subsection\n :param str sep2: second separator in case the first fails: chapter|section|subsection\n :param dict tags: tag patterns, e.g. INLINE_TAGS from common.py\n :param str chapter_formatter: formatter for default filenames\n :return: tuple with the chunks of text having a # header, titles, titles detected\n :rtype: (list[str], list[str], list[str])\n '
title_list_out = title_list.copy()
if (not len(title_list_out)):
title_list_out = ([] * len(chunks))
title_list_detected = ([] * len(chunks))
for (i, chunk) in enumerate(chunks):
title =
if (title == ):
(chunk, title) = create_title(chunk, sep, tags)
if ((title == ) and sep2):
(chunk, title) = create_title(chunk, sep2, tags)
if (title == ):
title = ((chapter_formatter % (i + 1)) + globals.dofile_basename)
title_list_detected[i] = title
if (i < len(title_list)):
if title_list[i]:
title = title_list[i]
title_list_out[i] = title
chunk = ((((((('=' * 9) + ' ') + title) + ' ') + ('=' * 9)) + '\n') + chunk)
chunks[i] = chunk
return (chunks, title_list_out, title_list_detected)
|
def create_title(chunk, sep, tags):
"Helper function to allow doconce jupyterbook to automatically assign titles in the TOC\n\n If a chunk of text starts with the section specified in sep, lift it up\n to a chapter section. This allows doconce jupyterbook to automatically use the\n section's text as title in the TOC on the left\n\n :param str chunk: text string\n :param str sep: chapter|section|subsection\n :param dict tags: tag patterns, e.g. INLINE_TAGS from common.py\n :return: tuple with the chunk stripped of its section header, and title\n :rtype: (str, str)\n "
title = ''
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if (m and (m.start() == 0)):
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = ('=' * s)
pattern = ('^ *%s +(.+?) +%s' % (header_old, header_old))
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1)
return (chunk, title)
| 746,731,705,735,869,800
|
Helper function to allow doconce jupyterbook to automatically assign titles in the TOC
If a chunk of text starts with the section specified in sep, lift it up
to a chapter section. This allows doconce jupyterbook to automatically use the
section's text as title in the TOC on the left
:param str chunk: text string
:param str sep: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:return: tuple with the chunk stripped of its section header, and title
:rtype: (str, str)
|
lib/doconce/jupyterbook.py
|
create_title
|
aless80/doconce
|
python
|
def create_title(chunk, sep, tags):
"Helper function to allow doconce jupyterbook to automatically assign titles in the TOC\n\n If a chunk of text starts with the section specified in sep, lift it up\n to a chapter section. This allows doconce jupyterbook to automatically use the\n section's text as title in the TOC on the left\n\n :param str chunk: text string\n :param str sep: chapter|section|subsection\n :param dict tags: tag patterns, e.g. INLINE_TAGS from common.py\n :return: tuple with the chunk stripped of its section header, and title\n :rtype: (str, str)\n "
title =
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if (m and (m.start() == 0)):
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = ('=' * s)
pattern = ('^ *%s +(.+?) +%s' % (header_old, header_old))
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, , chunk, flags=re.MULTILINE, count=1)
return (chunk, title)
|
def identify_format(text_list):
"Identify the appropriate formats to convert a list of DocOnce texts.\n\n Given a list of DocOnce texts, check if they contain code. If so, return the suffix\n '.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for\n the pandoc markdown format).\n :param list[str] text_list: list of strings using DocOnce syntax\n :return: list of formats\n :rtype: list[str]\n "
chunk_formats = ([''] * len(text_list))
for (i, text) in enumerate(text_list):
format = 'pandoc'
(_filestr, code_blocks, code_block_types, tex_blocks) = remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += ('.md' if (format == 'pandoc') else '.ipynb')
return chunk_formats
| -6,315,886,050,878,515,000
|
Identify the appropriate formats to convert a list of DocOnce texts.
Given a list of DocOnce texts, check if they contain code. If so, return the suffix
'.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for
the pandoc markdown format).
:param list[str] text_list: list of strings using DocOnce syntax
:return: list of formats
:rtype: list[str]
|
lib/doconce/jupyterbook.py
|
identify_format
|
aless80/doconce
|
python
|
def identify_format(text_list):
"Identify the appropriate formats to convert a list of DocOnce texts.\n\n Given a list of DocOnce texts, check if they contain code. If so, return the suffix\n '.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for\n the pandoc markdown format).\n :param list[str] text_list: list of strings using DocOnce syntax\n :return: list of formats\n :rtype: list[str]\n "
chunk_formats = ([] * len(text_list))
for (i, text) in enumerate(text_list):
format = 'pandoc'
(_filestr, code_blocks, code_block_types, tex_blocks) = remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += ('.md' if (format == 'pandoc') else '.ipynb')
return chunk_formats
|
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
'Create the content of a _toc.yml file\n\n Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file\n :param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.\n strings that can be used after the `file:` section in a _toc.yml\n :param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used\n after the `title:` section in a _toc.yml\n :param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book\n chapters or sections, respectively\n :param str dest: destination folder for _toc.yml\n :param str dest_toc: destination folder for the chapter files\n :return: content of a _toc.yml file\n :rtype: str\n '
def escape_chars(title):
'Wrap title in quotes if it contains colons, asterisks, bacticks'
if (re.search(':', title) or re.search('\\*', title) or re.search('\\`', title)):
title = title.replace('"', '\\"')
title = (('"' + title) + '"')
return title
relpath = os.path.relpath(dest, start=dest_toc)
if (relpath == '.'):
relpath = ''
else:
relpath += '/'
yml_text = ''
nesting_prev = 0
for (i, cfname) in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if (nesting == 0):
yml_text += '\n'
yml_text += yml_titledpage((relpath + cfname), ctitle, numbered=False)
else:
if (nesting_prev == 0):
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section((relpath + cfname), ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
| -2,230,910,722,808,470,300
|
Create the content of a _toc.yml file
Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file
:param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.
strings that can be used after the `file:` section in a _toc.yml
:param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used
after the `title:` section in a _toc.yml
:param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book
chapters or sections, respectively
:param str dest: destination folder for _toc.yml
:param str dest_toc: destination folder for the chapter files
:return: content of a _toc.yml file
:rtype: str
|
lib/doconce/jupyterbook.py
|
create_toc_yml
|
aless80/doconce
|
python
|
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
'Create the content of a _toc.yml file\n\n Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file\n :param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.\n strings that can be used after the `file:` section in a _toc.yml\n :param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used\n after the `title:` section in a _toc.yml\n :param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book\n chapters or sections, respectively\n :param str dest: destination folder for _toc.yml\n :param str dest_toc: destination folder for the chapter files\n :return: content of a _toc.yml file\n :rtype: str\n '
def escape_chars(title):
'Wrap title in quotes if it contains colons, asterisks, bacticks'
if (re.search(':', title) or re.search('\\*', title) or re.search('\\`', title)):
title = title.replace('"', '\\"')
title = (('"' + title) + '"')
return title
relpath = os.path.relpath(dest, start=dest_toc)
if (relpath == '.'):
relpath =
else:
relpath += '/'
yml_text =
nesting_prev = 0
for (i, cfname) in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if (nesting == 0):
yml_text += '\n'
yml_text += yml_titledpage((relpath + cfname), ctitle, numbered=False)
else:
if (nesting_prev == 0):
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section((relpath + cfname), ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
|
def print_help_jupyterbook():
'Pretty print help string and command line options\n\n Help function to print help and formatted command line options for doconce jupyterbook\n '
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
| -513,857,317,894,164,030
|
Pretty print help string and command line options
Help function to print help and formatted command line options for doconce jupyterbook
|
lib/doconce/jupyterbook.py
|
print_help_jupyterbook
|
aless80/doconce
|
python
|
def print_help_jupyterbook():
'Pretty print help string and command line options\n\n Help function to print help and formatted command line options for doconce jupyterbook\n '
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
|
def read_to_list(file):
'Read the content of a file to list\n\n Verify the existence of a file, then read it to a list by\n stripping newlines. The function aborts the program if the file does not exist.\n\n :param str file: Path to an existing file\n :return: list of strings\n :rtype: list[str]\n '
if (not os.path.isfile(file)):
errwarn(('*** error: file "%s" does not exist!' % file))
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
| -1,171,378,323,079,902,700
|
Read the content of a file to list
Verify the existence of a file, then read it to a list by
stripping newlines. The function aborts the program if the file does not exist.
:param str file: Path to an existing file
:return: list of strings
:rtype: list[str]
|
lib/doconce/jupyterbook.py
|
read_to_list
|
aless80/doconce
|
python
|
def read_to_list(file):
'Read the content of a file to list\n\n Verify the existence of a file, then read it to a list by\n stripping newlines. The function aborts the program if the file does not exist.\n\n :param str file: Path to an existing file\n :return: list of strings\n :rtype: list[str]\n '
if (not os.path.isfile(file)):
errwarn(('*** error: file "%s" does not exist!' % file))
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
|
def get_link_destinations(chunk):
'Find any target of a link in HTML code\n\n Use regex to find tags with the id or name attribute, which makes them a possible target of a link\n :param str chunk: text string\n :return: destinations, destination_tags\n :rtype: Tuple[list[str], list[str]]\n '
(destinations, destination_tags) = ([], [])
pattern_tag = '[\\w _\\-:]'
pattern_backslash = '[\\\\]'
pattern = ((((((((('<' + pattern_tag) + '+ (id|name)=') + pattern_backslash) + '["\']') + '(') + pattern_tag) + '+)') + pattern_backslash) + '["\'][^>]*>')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return (destinations, destination_tags)
| 6,399,748,933,904,265,000
|
Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
|
lib/doconce/jupyterbook.py
|
get_link_destinations
|
aless80/doconce
|
python
|
def get_link_destinations(chunk):
'Find any target of a link in HTML code\n\n Use regex to find tags with the id or name attribute, which makes them a possible target of a link\n :param str chunk: text string\n :return: destinations, destination_tags\n :rtype: Tuple[list[str], list[str]]\n '
(destinations, destination_tags) = ([], [])
pattern_tag = '[\\w _\\-:]'
pattern_backslash = '[\\\\]'
pattern = ((((((((('<' + pattern_tag) + '+ (id|name)=') + pattern_backslash) + '["\']') + '(') + pattern_tag) + '+)') + pattern_backslash) + '["\'][^>]*>')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return (destinations, destination_tags)
|
def fix_links(chunk, tag2file):
'Find and fix the the destinations of hyperlinks using HTML or markdown syntax\n\n Fix any link in a string text so that they can target a different html document.\n First use regex on a HTML text to find any HTML or markdown hyperlinks\n (e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the\n filename to the value of a link\'s href attribute (e.g. <a href="02_jupyterbook.html#sec1">)\n :param str chunk: text string\n :param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file[\'sec1\']=\'02_jupyterbook\'\n :return: chunk with fixed links\n :rtype: str\n '
chunk_out = chunk
pattern_tag = '[\\w _\\-:]'
pattern = (((('<' + pattern_tag) + '+ href=[\\\\]{0,2}["\']#(') + pattern_tag) + '+)[\\\\]{0,2}["\'][^>]*>')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace(('#' + tag), ((tag2file.get(tag, tag) + '.html#') + tag))
chunk_out = chunk_out.replace(match, fixed_tag)
pattern = (((('\\[' + pattern_tag) + '+\\]\\(#(') + pattern_tag) + '+)\\)')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace(('#' + tag), ((tag2file.get(tag, tag) + '.html#') + tag))
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
| 9,217,471,721,488,170,000
|
Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
|
lib/doconce/jupyterbook.py
|
fix_links
|
aless80/doconce
|
python
|
def fix_links(chunk, tag2file):
'Find and fix the the destinations of hyperlinks using HTML or markdown syntax\n\n Fix any link in a string text so that they can target a different html document.\n First use regex on a HTML text to find any HTML or markdown hyperlinks\n (e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the\n filename to the value of a link\'s href attribute (e.g. <a href="02_jupyterbook.html#sec1">)\n :param str chunk: text string\n :param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file[\'sec1\']=\'02_jupyterbook\'\n :return: chunk with fixed links\n :rtype: str\n '
chunk_out = chunk
pattern_tag = '[\\w _\\-:]'
pattern = (((('<' + pattern_tag) + '+ href=[\\\\]{0,2}["\']#(') + pattern_tag) + '+)[\\\\]{0,2}["\'][^>]*>')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace(('#' + tag), ((tag2file.get(tag, tag) + '.html#') + tag))
chunk_out = chunk_out.replace(match, fixed_tag)
pattern = (((('\\[' + pattern_tag) + '+\\]\\(#(') + pattern_tag) + '+)\\)')
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace(('#' + tag), ((tag2file.get(tag, tag) + '.html#') + tag))
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
|
def resolve_links_destinations(chunks, chunk_basenames):
'Fix links in jupyter-book chapters/sections so that they can target destinations in other files\n\n Prepend a filename to all links\' destinations e.g. <a href="#Langtangen_2012"> becomes\n <a href="02_jupyterbook.html#Langtangen_2012">\n :param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections\n :param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections\n :return: chunks with corrected links\n :rtype: Tuple[list[str], list[list[str]]]\n '
def strip_end(text, suffix):
if (suffix and text.endswith(suffix)):
return text[:(- len(suffix))]
return text
all_sects = chunks
all_basenames = chunk_basenames
all_basenames = list(map((lambda fname: strip_end(fname, '.md')), all_basenames))
all_basenames = list(map((lambda fname: strip_end(fname, '.ipynb')), all_basenames))
tag2file = {}
for i in range(len(all_sects)):
(ch_destinations, ch_destination_tags) = get_link_destinations(all_sects[i])
basename_list = ([all_basenames[i]] * len(ch_destinations))
tag2file.update(zip(ch_destination_tags, basename_list))
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
| 5,405,938,629,762,071,000
|
Fix links in jupyter-book chapters/sections so that they can target destinations in other files
Prepend a filename to all links' destinations e.g. <a href="#Langtangen_2012"> becomes
<a href="02_jupyterbook.html#Langtangen_2012">
:param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections
:param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections
:return: chunks with corrected links
:rtype: Tuple[list[str], list[list[str]]]
|
lib/doconce/jupyterbook.py
|
resolve_links_destinations
|
aless80/doconce
|
python
|
def resolve_links_destinations(chunks, chunk_basenames):
'Fix links in jupyter-book chapters/sections so that they can target destinations in other files\n\n Prepend a filename to all links\' destinations e.g. <a href="#Langtangen_2012"> becomes\n <a href="02_jupyterbook.html#Langtangen_2012">\n :param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections\n :param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections\n :return: chunks with corrected links\n :rtype: Tuple[list[str], list[list[str]]]\n '
def strip_end(text, suffix):
if (suffix and text.endswith(suffix)):
return text[:(- len(suffix))]
return text
all_sects = chunks
all_basenames = chunk_basenames
all_basenames = list(map((lambda fname: strip_end(fname, '.md')), all_basenames))
all_basenames = list(map((lambda fname: strip_end(fname, '.ipynb')), all_basenames))
tag2file = {}
for i in range(len(all_sects)):
(ch_destinations, ch_destination_tags) = get_link_destinations(all_sects[i])
basename_list = ([all_basenames[i]] * len(ch_destinations))
tag2file.update(zip(ch_destination_tags, basename_list))
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
|
def fix_media_src(filestr, dirname, dest):
'Fix the (relative) path to any figure and movie in the DocOnce file.\n\n The generated .md and .ipynb files will be created in the path passed to `--dest`.\n This method fixes the paths of the image and movie files so that they can be found\n in generated .md and .ipynb files.\n :param str filestr: text string\n :param str dirname: Path to an existing folder\n :param str dest: directory name\n :return: filestr with new paths\n :rtype: str\n '
patterns = [movie2html['movie_regex'], '\\!\\[<p><em>(.*)</em></p>\\]\\((.*)\\)', img2ipynb['imgtag_regex'], img2ipynb['md_regex'], '<!-- (?:dom:)(FIGURE|MOVIE): \\[(.*)', '<!-- <(\\w+) src="(.*)" .*>(?=[<|\\\\n])']
filestr_out = filestr
for (i, pattern) in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
if (pattern == movie2html['movie_regex']):
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n', 'parse:\n myst_enable_extensions:\n - html_image\n')
if (not src.startswith('/')):
if ((dirname != '') and (not dirname.endswith('/'))):
dirname += '/'
src_new = os.path.relpath((dirname + src), start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
| 358,296,290,649,753,860
|
Fix the (relative) path to any figure and movie in the DocOnce file.
The generated .md and .ipynb files will be created in the path passed to `--dest`.
This method fixes the paths of the image and movie files so that they can be found
in generated .md and .ipynb files.
:param str filestr: text string
:param str dirname: Path to an existing folder
:param str dest: directory name
:return: filestr with new paths
:rtype: str
|
lib/doconce/jupyterbook.py
|
fix_media_src
|
aless80/doconce
|
python
|
def fix_media_src(filestr, dirname, dest):
'Fix the (relative) path to any figure and movie in the DocOnce file.\n\n The generated .md and .ipynb files will be created in the path passed to `--dest`.\n This method fixes the paths of the image and movie files so that they can be found\n in generated .md and .ipynb files.\n :param str filestr: text string\n :param str dirname: Path to an existing folder\n :param str dest: directory name\n :return: filestr with new paths\n :rtype: str\n '
patterns = [movie2html['movie_regex'], '\\!\\[<p><em>(.*)</em></p>\\]\\((.*)\\)', img2ipynb['imgtag_regex'], img2ipynb['md_regex'], '<!-- (?:dom:)(FIGURE|MOVIE): \\[(.*)', '<!-- <(\\w+) src="(.*)" .*>(?=[<|\\\\n])']
filestr_out = filestr
for (i, pattern) in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
if (pattern == movie2html['movie_regex']):
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n', 'parse:\n myst_enable_extensions:\n - html_image\n')
if (not src.startswith('/')):
if ((dirname != ) and (not dirname.endswith('/'))):
dirname += '/'
src_new = os.path.relpath((dirname + src), start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
|
def escape_chars(title):
'Wrap title in quotes if it contains colons, asterisks, bacticks'
if (re.search(':', title) or re.search('\\*', title) or re.search('\\`', title)):
title = title.replace('"', '\\"')
title = (('"' + title) + '"')
return title
| -4,069,678,415,874,223,600
|
Wrap title in quotes if it contains colons, asterisks, bacticks
|
lib/doconce/jupyterbook.py
|
escape_chars
|
aless80/doconce
|
python
|
def escape_chars(title):
if (re.search(':', title) or re.search('\\*', title) or re.search('\\`', title)):
title = title.replace('"', '\\"')
title = (('"' + title) + '"')
return title
|
def mae(y_true, y_pred):
' Implementation of Mean average error\n '
return K.mean(K.abs((y_true - y_pred)))
| 8,321,551,904,465,290,000
|
Implementation of Mean average error
|
raynet/models.py
|
mae
|
paschalidoud/raynet
|
python
|
def mae(y_true, y_pred):
' \n '
return K.mean(K.abs((y_true - y_pred)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.