hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71c74c4947034e3fa1939f004fe6def695c2676 | 2,684 | py | Python | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | unjupyter.py | milo-trujillo/unjupyter | 2ea86f67e39060ddffb109a2ab94bd074c169fed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json, sys, os, base64, hashlib, glob
def writeSource(f, src):
for line in src:
f.write(line)
def processOutputs(f, outputs):
for output in outputs:
if( "text" in output.keys() ):
f.write("```\n")
for line in output["text"]:
f.write(line)
f.write("\n```\n")
if( "data" in output.keys() ):
filetypes = output["data"].keys()
for filetype in filetypes:
category, extension = filetype.split("/")
if( category == "image" ):
data = output["data"][filetype]
raw = base64.b64decode(data)
filename = hashlib.md5(raw).hexdigest() + "." + extension
with open(filename, "wb") as image:
image.write(raw)
f.write("\n\n\n" % (category, extension, filename))
elif( category == "text" and extension == "plain" ):
data = output["data"][filetype]
f.write("```\n")
writeSource(f, data)
f.write("\n```\n\n")
elif( category == "text" and extension == "html" and "text/plain" in filetypes ):
sys.stderr.write("Info: Ignoring an 'html' output in favor of available plaintext\n")
elif( category == "text" and extension == "html" ):
sys.stderr.write("Info: Writing raw html because there is no plaintext counterpart :(\n")
data = output["data"][filetype]
writeSource(f, data)
f.write("\n\n")
else:
sys.stderr.write("WARNING: Skipping unsupported data type '%s'\n" % (filetype))
def convertNotebook(infile, outfile):
with open(outfile, "w") as md:
with open(infile, "r") as notebook:
data = json.load(notebook)
cells = data["cells"]
for cell in cells:
if( cell["cell_type"] == "markdown" ):
writeSource(md, cell["source"])
md.write("\n\n")
elif( cell["cell_type"] == "code" ):
if( len(cell["source"]) > 0 ):
md.write("```\n")
writeSource(md, cell["source"])
md.write("\n```\n\n")
if( len(cell["outputs"]) > 0 ):
md.write("Output:\n\n")
processOutputs(md, cell["outputs"])
md.write("\n")
sys.stderr.flush()
print("Notebook '%s' exported as '%s'" % (infile, outfile))
if __name__ == "__main__":
if( len(sys.argv) == 2 ):
if( os.path.isdir(sys.argv[1]) ):
for infile in glob.glob(sys.argv[1]+"/*.ipynb"):
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
else:
infile = sys.argv[1]
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
elif( len(sys.argv) == 3 ):
infile = sys.argv[1]
outfile = sys.argv[2]
convertNotebook(infile, outfile)
else:
sys.stderr.write("USAGE: %s <infile.ipynb> [outfile.md]\n")
sys.stderr.write(" or: %s <directory>\n")
sys.exit(1)
| 33.55 | 94 | 0.604322 |
import json, sys, os, base64, hashlib, glob
def writeSource(f, src):
for line in src:
f.write(line)
def processOutputs(f, outputs):
for output in outputs:
if( "text" in output.keys() ):
f.write("```\n")
for line in output["text"]:
f.write(line)
f.write("\n```\n")
if( "data" in output.keys() ):
filetypes = output["data"].keys()
for filetype in filetypes:
category, extension = filetype.split("/")
if( category == "image" ):
data = output["data"][filetype]
raw = base64.b64decode(data)
filename = hashlib.md5(raw).hexdigest() + "." + extension
with open(filename, "wb") as image:
image.write(raw)
f.write("\n\n\n" % (category, extension, filename))
elif( category == "text" and extension == "plain" ):
data = output["data"][filetype]
f.write("```\n")
writeSource(f, data)
f.write("\n```\n\n")
elif( category == "text" and extension == "html" and "text/plain" in filetypes ):
sys.stderr.write("Info: Ignoring an 'html' output in favor of available plaintext\n")
elif( category == "text" and extension == "html" ):
sys.stderr.write("Info: Writing raw html because there is no plaintext counterpart :(\n")
data = output["data"][filetype]
writeSource(f, data)
f.write("\n\n")
else:
sys.stderr.write("WARNING: Skipping unsupported data type '%s'\n" % (filetype))
def convertNotebook(infile, outfile):
with open(outfile, "w") as md:
with open(infile, "r") as notebook:
data = json.load(notebook)
cells = data["cells"]
for cell in cells:
if( cell["cell_type"] == "markdown" ):
writeSource(md, cell["source"])
md.write("\n\n")
elif( cell["cell_type"] == "code" ):
if( len(cell["source"]) > 0 ):
md.write("```\n")
writeSource(md, cell["source"])
md.write("\n```\n\n")
if( len(cell["outputs"]) > 0 ):
md.write("Output:\n\n")
processOutputs(md, cell["outputs"])
md.write("\n")
sys.stderr.flush()
print("Notebook '%s' exported as '%s'" % (infile, outfile))
if __name__ == "__main__":
if( len(sys.argv) == 2 ):
if( os.path.isdir(sys.argv[1]) ):
for infile in glob.glob(sys.argv[1]+"/*.ipynb"):
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
else:
infile = sys.argv[1]
outfile = os.path.splitext(infile)[0] + ".md"
convertNotebook(infile, outfile)
elif( len(sys.argv) == 3 ):
infile = sys.argv[1]
outfile = sys.argv[2]
convertNotebook(infile, outfile)
else:
sys.stderr.write("USAGE: %s <infile.ipynb> [outfile.md]\n")
sys.stderr.write(" or: %s <directory>\n")
sys.exit(1)
| true | true |
f71c7536f0d8bae32792340fd5193c009dbbeef0 | 403 | py | Python | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 3 | 2021-03-12T18:32:39.000Z | 2021-11-08T10:21:04.000Z | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | null | null | null | AIC21_Backend/asgi.py | mehrbodjavadi79/AIC21-Backend | 9f4342781f0722804a2eb704b43b52984c81b40a | [
"MIT"
] | 2 | 2021-01-29T14:52:53.000Z | 2022-03-05T10:24:24.000Z | """
ASGI config for AIC21_Backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AIC21_Backend.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AIC21_Backend.settings')
application = get_asgi_application()
| true | true |
f71c767d697d8a28a293c70fc345f6c9aac815fd | 296 | py | Python | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | 2 | 2020-01-15T06:38:34.000Z | 2020-01-22T20:42:19.000Z | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | null | null | null | TweetsToDB/main.py | lru-avershave/CapstoneProject | f74b4c73ffb0214a498b19f5f51481c529fa85a8 | [
"MIT"
] | 1 | 2020-01-15T20:11:48.000Z | 2020-01-15T20:11:48.000Z | import mongodb_setup as dbConnection
import TweetModel as TweetModel
# from watchdir import watch
from ImportText import collectTxt
class main():
try:
dbConnection
collectTxt()
# watch()
except KeyboardInterrupt:
print("Interrupted Main")
exit(0) | 21.142857 | 36 | 0.679054 | import mongodb_setup as dbConnection
import TweetModel as TweetModel
from ImportText import collectTxt
class main():
try:
dbConnection
collectTxt()
except KeyboardInterrupt:
print("Interrupted Main")
exit(0) | true | true |
f71c76b8aae27f9f54f39dc22abd7134629a2418 | 6,042 | py | Python | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 2 | 2021-07-01T14:23:01.000Z | 2022-01-12T01:06:24.000Z | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 14 | 2019-06-25T18:12:29.000Z | 2022-02-08T15:17:27.000Z | yateto/arch.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 3 | 2021-05-14T13:04:28.000Z | 2021-12-24T03:15:35.000Z | ##
# @file
# This file is part of SeisSol.
#
# @author Carsten Uphoff (c.uphoff AT tum.de, http://www5.in.tum.de/wiki/index.php/Carsten_Uphoff,_M.Sc.)
#
# @section LICENSE
# Copyright (c) 2015-2018, SeisSol Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# @section DESCRIPTION
#
from .memory import DenseMemoryLayout
class Architecture(object):
def __init__(self,
name,
precision,
alignment,
enablePrefetch=False,
sub_name=None,
host_name=None):
"""
Args:
name (str): name of the compute (main) architecture.
sub_name (str): name of sub. architecture type e.g., a model of Nvidia streaming
multiprocessor (sm_60, sm_61, etc). In case of CPU, the field is equal to None
precision (str): either 'd' or 's' character which stands for 'double' or 'single' precision
alignment (int): length of a vector register (unit) in bytes
enablePrefetch (bool): indicates whether the compute (main) architecture supports
data prefetching
host_name (str): name of the host (CPU) architecture. If the code is intentend to be generated
to CPU-like architecture then the field should be equal to None
"""
self.name = name
self.sub_name = sub_name
self.host_name = host_name
self.precision = precision.upper()
if self.precision == 'D':
self.bytesPerReal = 8
self.typename = 'double'
self.epsilon = 2.22e-16
elif self.precision == 'S':
self.bytesPerReal = 4
self.typename = 'float'
self.epsilon = 1.19e-7
else:
raise ValueError(f'Unknown precision type {self.precision}')
self.alignment = alignment
assert self.alignment % self.bytesPerReal == 0
self.alignedReals = self.alignment // self.bytesPerReal
self.enablePrefetch = enablePrefetch
self.uintTypename = 'unsigned'
self.ulongTypename = 'unsigned long'
self._tmpStackLimit = 524288
def setTmpStackLimit(self, tmpStackLimit):
self._tmpStackLimit = tmpStackLimit
def alignedLower(self, index):
return index - index % self.alignedReals
def alignedUpper(self, index):
return index + (self.alignedReals - index % self.alignedReals) % self.alignedReals
def alignedShape(self, shape):
return (self.alignedUpper(shape[0]),) + shape[1:]
def checkAlignment(self, offset):
return offset % self.alignedReals == 0
def formatConstant(self, constant):
return str(constant) + ('f' if self.precision == 'S' else '')
def onHeap(self, numReals):
return (numReals * self.bytesPerReal) > self._tmpStackLimit
def _get_name_and_precision(ident):
return ident[1:], ident[0].upper()
def getArchitectureIdentifiedBy(ident):
name, precision = _get_name_and_precision(ident)
arch = {
'noarch': Architecture(name, precision, 16, False),
'wsm': Architecture(name, precision, 16, False),
'snb': Architecture(name, precision, 32, False),
'hsw': Architecture(name, precision, 32, False),
'skx': Architecture(name, precision, 64, True),
'knc': Architecture(name, precision, 64, False),
'knl': Architecture(name, precision, 64, True), # Libxsmm currently supports prefetch only for KNL kernels
'rome': Architecture(name, precision, 32, False),
'thunderx2t99': Architecture(name, precision, 16, False),
'power9': Architecture(name, precision, 16, False)
}
return arch[name]
def getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
compute_name, compute_precision = _get_name_and_precision(compute_ident)
host_name, host_precision = _get_name_and_precision(host_ident)
if (compute_precision != host_precision):
raise ValueError(f'Precision of host and compute arch. must be the same. '
f'Given: {host_ident}, {compute_ident}')
arch = {
'nvidia': Architecture(compute_name, compute_precision, 64, False, compute_sub_arch, host_name)
}
return arch[compute_name]
def useArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
if not (compute_sub_arch or host_ident):
arch = getArchitectureIdentifiedBy(compute_ident)
elif (compute_sub_arch and host_ident):
arch = getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch, host_ident)
else:
raise ValueError(f'given an incomplete set of input parameters: '
f'{compute_ident}, {compute_sub_arch}, {host_ident}')
DenseMemoryLayout.setAlignmentArch(arch)
return arch
| 38.484076 | 110 | 0.71665 |
from .memory import DenseMemoryLayout
class Architecture(object):
def __init__(self,
name,
precision,
alignment,
enablePrefetch=False,
sub_name=None,
host_name=None):
self.name = name
self.sub_name = sub_name
self.host_name = host_name
self.precision = precision.upper()
if self.precision == 'D':
self.bytesPerReal = 8
self.typename = 'double'
self.epsilon = 2.22e-16
elif self.precision == 'S':
self.bytesPerReal = 4
self.typename = 'float'
self.epsilon = 1.19e-7
else:
raise ValueError(f'Unknown precision type {self.precision}')
self.alignment = alignment
assert self.alignment % self.bytesPerReal == 0
self.alignedReals = self.alignment // self.bytesPerReal
self.enablePrefetch = enablePrefetch
self.uintTypename = 'unsigned'
self.ulongTypename = 'unsigned long'
self._tmpStackLimit = 524288
def setTmpStackLimit(self, tmpStackLimit):
self._tmpStackLimit = tmpStackLimit
def alignedLower(self, index):
return index - index % self.alignedReals
def alignedUpper(self, index):
return index + (self.alignedReals - index % self.alignedReals) % self.alignedReals
def alignedShape(self, shape):
return (self.alignedUpper(shape[0]),) + shape[1:]
def checkAlignment(self, offset):
return offset % self.alignedReals == 0
def formatConstant(self, constant):
return str(constant) + ('f' if self.precision == 'S' else '')
def onHeap(self, numReals):
return (numReals * self.bytesPerReal) > self._tmpStackLimit
def _get_name_and_precision(ident):
return ident[1:], ident[0].upper()
def getArchitectureIdentifiedBy(ident):
name, precision = _get_name_and_precision(ident)
arch = {
'noarch': Architecture(name, precision, 16, False),
'wsm': Architecture(name, precision, 16, False),
'snb': Architecture(name, precision, 32, False),
'hsw': Architecture(name, precision, 32, False),
'skx': Architecture(name, precision, 64, True),
'knc': Architecture(name, precision, 64, False),
'knl': Architecture(name, precision, 64, True),
'rome': Architecture(name, precision, 32, False),
'thunderx2t99': Architecture(name, precision, 16, False),
'power9': Architecture(name, precision, 16, False)
}
return arch[name]
def getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
compute_name, compute_precision = _get_name_and_precision(compute_ident)
host_name, host_precision = _get_name_and_precision(host_ident)
if (compute_precision != host_precision):
raise ValueError(f'Precision of host and compute arch. must be the same. '
f'Given: {host_ident}, {compute_ident}')
arch = {
'nvidia': Architecture(compute_name, compute_precision, 64, False, compute_sub_arch, host_name)
}
return arch[compute_name]
def useArchitectureIdentifiedBy(compute_ident, compute_sub_arch=None, host_ident=None):
if not (compute_sub_arch or host_ident):
arch = getArchitectureIdentifiedBy(compute_ident)
elif (compute_sub_arch and host_ident):
arch = getHeterogeneousArchitectureIdentifiedBy(compute_ident, compute_sub_arch, host_ident)
else:
raise ValueError(f'given an incomplete set of input parameters: '
f'{compute_ident}, {compute_sub_arch}, {host_ident}')
DenseMemoryLayout.setAlignmentArch(arch)
return arch
| true | true |
f71c77a35b95b5244ed1a2f4cb8314b74edffc12 | 19,222 | py | Python | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | lib/spack/spack/test/install.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import pytest
import llnl.util.filesystem as fs
import spack.error
import spack.patch
import spack.repo
import spack.store
import spack.util.spack_json as sjson
from spack.package import (
InstallError,
PackageBase,
PackageStillNeededError,
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
)
from spack.spec import Spec
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package')
spec.concretize()
assert spec.concrete
# Get the package
pkg = spec.package
try:
pkg.do_install()
spec._package = None
monkeypatch.setattr(spack.repo, 'get', find_nothing)
with pytest.raises(spack.repo.UnknownPackageError):
spec.package
pkg.do_uninstall()
except Exception:
pkg.remove_prefix()
raise
def mock_remove_prefix(*args):
raise MockInstallError(
"Intentional error",
"Mock remove_prefix method intentionally fails")
class RemovePrefixChecker(object):
def __init__(self, wrapped_rm_prefix):
self.removed = False
self.wrapped_rm_prefix = wrapped_rm_prefix
def remove_prefix(self):
self.removed = True
self.wrapped_rm_prefix()
class MockStage(object):
def __init__(self, wrapped_stage):
self.wrapped_stage = wrapped_stage
self.test_destroyed = False
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.destroy()
def destroy(self):
self.test_destroyed = True
self.wrapped_stage.destroy()
def create(self):
self.wrapped_stage.create()
def __getattr__(self, attr):
if attr == 'wrapped_stage':
# This attribute may not be defined at some point during unpickling
raise AttributeError()
return getattr(self.wrapped_stage, attr)
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
instance_rm_prefix = pkg.remove_prefix
try:
pkg.succeed = False
pkg.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError):
pkg.do_install()
assert os.path.isdir(pkg.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
pkg.remove_prefix = rm_prefix_checker.remove_prefix
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True
pkg.stage = MockStage(pkg.stage)
pkg.do_install(restage=True)
assert rm_prefix_checker.removed
assert pkg.stage.test_destroyed
assert pkg.installed
finally:
pkg.remove_prefix = instance_rm_prefix
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec('dependent-install ^/' + dependency_hash)
dependent.concretize()
dependency.package.patches['dependency-install'] = [
spack.patch.UrlPatch(
dependent.package, 'file://fake.patch', sha256='unused-hash')]
assert dependent['dependency-install'] == dependency
def test_installed_dependency_request_conflicts(
install_mockery, mock_fetch, mutable_mock_repo):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec(
'conflicting-dependent ^/' + dependency_hash)
with pytest.raises(spack.error.UnsatisfiableSpecError):
dependent.concretize()
def test_install_dependency_symlinks_pkg(
install_mockery, mock_fetch, mutable_mock_repo):
"""Test dependency flattening/symlinks mock package."""
spec = Spec('flatten-deps')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Ensure dependency directory exists after the installation.
dependency_dir = os.path.join(pkg.prefix, 'dependency-install')
assert os.path.isdir(dependency_dir)
def test_install_times(
install_mockery, mock_fetch, mutable_mock_repo):
"""Test install times added."""
spec = Spec('dev-build-test-install-phases')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Ensure dependency directory exists after the installation.
install_times = os.path.join(pkg.prefix, ".spack", 'install_times.json')
assert os.path.isfile(install_times)
# Ensure the phases are included
with open(install_times, 'r') as timefile:
times = sjson.load(timefile.read())
# The order should be maintained
phases = [x['name'] for x in times['phases']]
total = sum([x['seconds'] for x in times['phases']])
for name in ['one', 'two', 'three', 'install']:
assert name in phases
# Give a generous difference threshold
assert abs(total - times['total']['seconds']) < 5
def test_flatten_deps(
install_mockery, mock_fetch, mutable_mock_repo):
"""Explicitly test the flattening code for coverage purposes."""
# Unfortunately, executing the 'flatten-deps' spec's installation does
# not affect code coverage results, so be explicit here.
spec = Spec('dependent-install')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Demonstrate that the directory does not appear under the spec
# prior to the flatten operation.
dependency_name = 'dependency-install'
assert dependency_name not in os.listdir(pkg.prefix)
# Flatten the dependencies and ensure the dependency directory is there.
spack.package.flatten_dependencies(spec, pkg.prefix)
dependency_dir = os.path.join(pkg.prefix, dependency_name)
assert os.path.isdir(dependency_dir)
@pytest.fixture()
def install_upstream(tmpdir_factory, gen_mock_layout, install_mockery):
"""Provides a function that installs a specified set of specs to an
upstream database. The function returns a store which points to the
upstream, as well as the upstream layout (for verifying that dependent
installs are using the upstream installs).
"""
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
def _install_upstream(*specs):
for spec_str in specs:
s = spack.spec.Spec(spec_str).concretized()
prepared_db.add(s, upstream_layout)
downstream_root = str(tmpdir_factory.mktemp('mock_downstream_db_root'))
db_for_test = spack.database.Database(
downstream_root, upstream_dbs=[prepared_db]
)
store = spack.store.Store(downstream_root)
store.db = db_for_test
return store, upstream_layout
return _install_upstream
def test_installed_upstream_external(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
s, _ = install_upstream('externaltool')
with spack.store.use_store(s):
dependent = spack.spec.Spec('externaltest')
dependent.concretize()
new_dependency = dependent['externaltool']
assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
def test_installed_upstream(install_upstream, mock_fetch):
"""Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled.
"""
s, upstream_layout = install_upstream('dependency-install')
with spack.store.use_store(s):
dependency = spack.spec.Spec('dependency-install').concretized()
dependent = spack.spec.Spec('dependent-install').concretized()
new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream
assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency))
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
# Normally the stage should start unset, but other tests set it
pkg._stage = None
remove_prefix = spack.package.Package.remove_prefix
try:
# If remove_prefix is called at any point in this test, that is an
# error
pkg.succeed = False # make the build fail
spack.package.Package.remove_prefix = mock_remove_prefix
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install(keep_prefix=True)
assert os.path.exists(pkg.prefix)
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True # make the build succeed
pkg.stage = MockStage(pkg.stage)
pkg.do_install(keep_prefix=True)
assert pkg.installed
assert not pkg.stage.test_destroyed
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_second_install_no_overwrite_first(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
remove_prefix = spack.package.Package.remove_prefix
try:
spack.package.Package.remove_prefix = mock_remove_prefix
pkg.succeed = True
pkg.do_install()
assert pkg.installed
# If Package.install is called after this point, it will fail
pkg.succeed = False
pkg.do_install()
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmpdir):
"""
Test that different specs with coinciding install prefixes will fail
to install.
"""
projections = {'all': 'all-specs-project-to-this-prefix'}
store = spack.store.Store(str(tmpdir), projections=projections)
with spack.store.use_store(store):
with spack.config.override('config:checksum', False):
pkg_a = Spec('libelf@0.8.13').concretized().package
pkg_b = Spec('libelf@0.8.12').concretized().package
pkg_a.do_install()
with pytest.raises(InstallError, match="Install prefix collision"):
pkg_b.do_install()
def test_store(install_mockery, mock_fetch):
spec = Spec('cmake-client').concretized()
pkg = spec.package
pkg.do_install()
@pytest.mark.disable_clean_stage_check
def test_failing_build(install_mockery, mock_fetch, capfd):
spec = Spec('failing-build').concretized()
pkg = spec.package
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install()
assert 'InstallError: Expected Failure' in capfd.readouterr()[0]
class MockInstallError(spack.error.SpackError):
pass
def test_uninstall_by_spec_errors(mutable_database):
"""Test exceptional cases with the uninstall command."""
# Try to uninstall a spec that has not been installed
spec = Spec('dependent-install')
spec.concretize()
with pytest.raises(InstallError, match="is not installed"):
PackageBase.uninstall_by_spec(spec)
# Try an unforced uninstall of a spec with dependencies
rec = mutable_database.get_record('mpich')
with pytest.raises(PackageStillNeededError, match="Cannot uninstall"):
PackageBase.uninstall_by_spec(rec.spec)
@pytest.mark.disable_clean_stage_check
def test_nosource_pkg_install(
install_mockery, mock_fetch, mock_packages, capfd):
"""Test install phases with the nosource package."""
spec = Spec('nosource').concretized()
pkg = spec.package
# Make sure install works even though there is no associated code.
pkg.do_install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
assert "Missing a source id for nosource" in out[1]
def test_nosource_pkg_install_post_install(
install_mockery, mock_fetch, mock_packages):
"""Test install phases with the nosource package with post-install."""
spec = Spec('nosource-install').concretized()
pkg = spec.package
# Make sure both the install and post-install package methods work.
pkg.do_install()
# Ensure the file created in the package's `install` method exists.
install_txt = os.path.join(spec.prefix, 'install.txt')
assert os.path.isfile(install_txt)
# Ensure the file created in the package's `post-install` method exists.
post_install_txt = os.path.join(spec.prefix, 'post-install.txt')
assert os.path.isfile(post_install_txt)
def test_pkg_build_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = spec.package.log_path
assert log_path.endswith(_spack_build_logfile)
env_path = spec.package.env_path
assert env_path.endswith(_spack_build_envfile)
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous log filenames
older_log = 'spack-build.out'
fs.touch(older_log)
assert spec.package.log_path.endswith(older_log)
# Now check the newer log filename
last_log = 'spack-build.txt'
os.rename(older_log, last_log)
assert spec.package.log_path.endswith(last_log)
# Check the old environment file
last_env = 'spack-build.env'
os.rename(last_log, last_env)
assert spec.package.env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_pkg_install_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = os.path.join(spec.prefix, '.spack', _spack_build_logfile)
assert spec.package.install_log_path == log_path
env_path = os.path.join(spec.prefix, '.spack', _spack_build_envfile)
assert spec.package.install_env_path == env_path
args_path = os.path.join(spec.prefix, '.spack', _spack_configure_argsfile)
assert spec.package.install_configure_args_path == args_path
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous install log filenames
older_log = 'build.out'
fs.touch(older_log)
assert spec.package.install_log_path.endswith(older_log)
# Now check the newer install log filename
last_log = 'build.txt'
os.rename(older_log, last_log)
assert spec.package.install_log_path.endswith(last_log)
# Check the old install environment file
last_env = 'build.env'
os.rename(last_log, last_env)
assert spec.package.install_env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_log_install_without_build_files(install_mockery):
"""Test the installer log function when no build files are present."""
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
def test_log_install_with_build_files(install_mockery, monkeypatch):
"""Test the installer's log function when have build files."""
config_log = 'config.log'
# Retain the original function for use in the monkey patch that is used
# to raise an exception under the desired condition for test coverage.
orig_install_fn = fs.install
def _install(src, dest):
orig_install_fn(src, dest)
if src.endswith(config_log):
raise Exception('Mock log install error')
monkeypatch.setattr(fs, 'install', _install)
spec = Spec('trivial-install-test-package').concretized()
# Set up mock build files and try again to include archive failure
log_path = spec.package.log_path
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
fs.touch(log_path)
fs.touch(spec.package.env_path)
fs.touch(spec.package.configure_args_path)
install_path = os.path.dirname(spec.package.install_log_path)
fs.mkdirp(install_path)
source = spec.package.stage.source_path
config = os.path.join(source, 'config.log')
fs.touchp(config)
spec.package.archive_files = ['missing', '..', config]
spack.installer.log(spec.package)
assert os.path.exists(spec.package.install_log_path)
assert os.path.exists(spec.package.install_env_path)
assert os.path.exists(spec.package.install_configure_args_path)
archive_dir = os.path.join(install_path, 'archived-files')
source_dir = os.path.dirname(source)
rel_config = os.path.relpath(config, source_dir)
assert os.path.exists(os.path.join(archive_dir, rel_config))
assert not os.path.exists(os.path.join(archive_dir, 'missing'))
expected_errs = [
'OUTSIDE SOURCE PATH', # for '..'
'FAILED TO ARCHIVE' # for rel_config
]
with open(os.path.join(archive_dir, 'errors.txt'), 'r') as fd:
for ln, expected in zip(fd, expected_errs):
assert expected in ln
# Cleanup
shutil.rmtree(log_dir)
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
"""Test attempts to perform install phases with unconcretized spec."""
spec = Spec('trivial-install-test-package')
with pytest.raises(ValueError, match='must have a concrete spec'):
spec.package.do_install()
with pytest.raises(ValueError, match="only patch concrete packages"):
spec.package.do_patch()
def test_install_error():
try:
msg = 'test install error'
long_msg = 'this is the long version of test install error'
raise InstallError(msg, long_msg=long_msg)
except Exception as exc:
assert exc.__class__.__name__ == 'InstallError'
assert exc.message == msg
assert exc.long_message == long_msg
| 33.371528 | 83 | 0.70232 |
import os
import shutil
import pytest
import llnl.util.filesystem as fs
import spack.error
import spack.patch
import spack.repo
import spack.store
import spack.util.spack_json as sjson
from spack.package import (
InstallError,
PackageBase,
PackageStillNeededError,
_spack_build_envfile,
_spack_build_logfile,
_spack_configure_argsfile,
)
from spack.spec import Spec
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
spec = Spec('trivial-install-test-package')
spec.concretize()
assert spec.concrete
pkg = spec.package
try:
pkg.do_install()
spec._package = None
monkeypatch.setattr(spack.repo, 'get', find_nothing)
with pytest.raises(spack.repo.UnknownPackageError):
spec.package
pkg.do_uninstall()
except Exception:
pkg.remove_prefix()
raise
def mock_remove_prefix(*args):
raise MockInstallError(
"Intentional error",
"Mock remove_prefix method intentionally fails")
class RemovePrefixChecker(object):
def __init__(self, wrapped_rm_prefix):
self.removed = False
self.wrapped_rm_prefix = wrapped_rm_prefix
def remove_prefix(self):
self.removed = True
self.wrapped_rm_prefix()
class MockStage(object):
def __init__(self, wrapped_stage):
self.wrapped_stage = wrapped_stage
self.test_destroyed = False
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.destroy()
def destroy(self):
self.test_destroyed = True
self.wrapped_stage.destroy()
def create(self):
self.wrapped_stage.create()
def __getattr__(self, attr):
if attr == 'wrapped_stage':
raise AttributeError()
return getattr(self.wrapped_stage, attr)
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
instance_rm_prefix = pkg.remove_prefix
try:
pkg.succeed = False
pkg.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError):
pkg.do_install()
assert os.path.isdir(pkg.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
pkg.remove_prefix = rm_prefix_checker.remove_prefix
spack.store.db.clear_failure(spec, True)
pkg.succeed = True
pkg.stage = MockStage(pkg.stage)
pkg.do_install(restage=True)
assert rm_prefix_checker.removed
assert pkg.stage.test_destroyed
assert pkg.installed
finally:
pkg.remove_prefix = instance_rm_prefix
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec('dependent-install ^/' + dependency_hash)
dependent.concretize()
dependency.package.patches['dependency-install'] = [
spack.patch.UrlPatch(
dependent.package, 'file://fake.patch', sha256='unused-hash')]
assert dependent['dependency-install'] == dependency
def test_installed_dependency_request_conflicts(
install_mockery, mock_fetch, mutable_mock_repo):
dependency = Spec('dependency-install')
dependency.concretize()
dependency.package.do_install()
dependency_hash = dependency.dag_hash()
dependent = Spec(
'conflicting-dependent ^/' + dependency_hash)
with pytest.raises(spack.error.UnsatisfiableSpecError):
dependent.concretize()
def test_install_dependency_symlinks_pkg(
install_mockery, mock_fetch, mutable_mock_repo):
spec = Spec('flatten-deps')
spec.concretize()
pkg = spec.package
pkg.do_install()
dependency_dir = os.path.join(pkg.prefix, 'dependency-install')
assert os.path.isdir(dependency_dir)
def test_install_times(
install_mockery, mock_fetch, mutable_mock_repo):
spec = Spec('dev-build-test-install-phases')
spec.concretize()
pkg = spec.package
pkg.do_install()
install_times = os.path.join(pkg.prefix, ".spack", 'install_times.json')
assert os.path.isfile(install_times)
with open(install_times, 'r') as timefile:
times = sjson.load(timefile.read())
phases = [x['name'] for x in times['phases']]
total = sum([x['seconds'] for x in times['phases']])
for name in ['one', 'two', 'three', 'install']:
assert name in phases
assert abs(total - times['total']['seconds']) < 5
def test_flatten_deps(
install_mockery, mock_fetch, mutable_mock_repo):
# not affect code coverage results, so be explicit here.
spec = Spec('dependent-install')
spec.concretize()
pkg = spec.package
pkg.do_install()
# Demonstrate that the directory does not appear under the spec
# prior to the flatten operation.
dependency_name = 'dependency-install'
assert dependency_name not in os.listdir(pkg.prefix)
# Flatten the dependencies and ensure the dependency directory is there.
spack.package.flatten_dependencies(spec, pkg.prefix)
dependency_dir = os.path.join(pkg.prefix, dependency_name)
assert os.path.isdir(dependency_dir)
@pytest.fixture()
def install_upstream(tmpdir_factory, gen_mock_layout, install_mockery):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
prepared_db = spack.database.Database(mock_db_root)
upstream_layout = gen_mock_layout('/a/')
def _install_upstream(*specs):
for spec_str in specs:
s = spack.spec.Spec(spec_str).concretized()
prepared_db.add(s, upstream_layout)
downstream_root = str(tmpdir_factory.mktemp('mock_downstream_db_root'))
db_for_test = spack.database.Database(
downstream_root, upstream_dbs=[prepared_db]
)
store = spack.store.Store(downstream_root)
store.db = db_for_test
return store, upstream_layout
return _install_upstream
def test_installed_upstream_external(install_upstream, mock_fetch):
s, _ = install_upstream('externaltool')
with spack.store.use_store(s):
dependent = spack.spec.Spec('externaltest')
dependent.concretize()
new_dependency = dependent['externaltool']
assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
def test_installed_upstream(install_upstream, mock_fetch):
s, upstream_layout = install_upstream('dependency-install')
with spack.store.use_store(s):
dependency = spack.spec.Spec('dependency-install').concretized()
dependent = spack.spec.Spec('dependent-install').concretized()
new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream
assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency))
dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix)
@pytest.mark.disable_clean_stage_check
def test_partial_install_keep_prefix(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
# Normally the stage should start unset, but other tests set it
pkg._stage = None
remove_prefix = spack.package.Package.remove_prefix
try:
# If remove_prefix is called at any point in this test, that is an
# error
pkg.succeed = False # make the build fail
spack.package.Package.remove_prefix = mock_remove_prefix
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install(keep_prefix=True)
assert os.path.exists(pkg.prefix)
# must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True)
pkg.succeed = True # make the build succeed
pkg.stage = MockStage(pkg.stage)
pkg.do_install(keep_prefix=True)
assert pkg.installed
assert not pkg.stage.test_destroyed
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_second_install_no_overwrite_first(install_mockery, mock_fetch):
spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec)
remove_prefix = spack.package.Package.remove_prefix
try:
spack.package.Package.remove_prefix = mock_remove_prefix
pkg.succeed = True
pkg.do_install()
assert pkg.installed
# If Package.install is called after this point, it will fail
pkg.succeed = False
pkg.do_install()
finally:
spack.package.Package.remove_prefix = remove_prefix
def test_install_prefix_collision_fails(config, mock_fetch, mock_packages, tmpdir):
projections = {'all': 'all-specs-project-to-this-prefix'}
store = spack.store.Store(str(tmpdir), projections=projections)
with spack.store.use_store(store):
with spack.config.override('config:checksum', False):
pkg_a = Spec('libelf@0.8.13').concretized().package
pkg_b = Spec('libelf@0.8.12').concretized().package
pkg_a.do_install()
with pytest.raises(InstallError, match="Install prefix collision"):
pkg_b.do_install()
def test_store(install_mockery, mock_fetch):
spec = Spec('cmake-client').concretized()
pkg = spec.package
pkg.do_install()
@pytest.mark.disable_clean_stage_check
def test_failing_build(install_mockery, mock_fetch, capfd):
spec = Spec('failing-build').concretized()
pkg = spec.package
with pytest.raises(spack.build_environment.ChildError):
pkg.do_install()
assert 'InstallError: Expected Failure' in capfd.readouterr()[0]
class MockInstallError(spack.error.SpackError):
pass
def test_uninstall_by_spec_errors(mutable_database):
# Try to uninstall a spec that has not been installed
spec = Spec('dependent-install')
spec.concretize()
with pytest.raises(InstallError, match="is not installed"):
PackageBase.uninstall_by_spec(spec)
# Try an unforced uninstall of a spec with dependencies
rec = mutable_database.get_record('mpich')
with pytest.raises(PackageStillNeededError, match="Cannot uninstall"):
PackageBase.uninstall_by_spec(rec.spec)
@pytest.mark.disable_clean_stage_check
def test_nosource_pkg_install(
install_mockery, mock_fetch, mock_packages, capfd):
spec = Spec('nosource').concretized()
pkg = spec.package
# Make sure install works even though there is no associated code.
pkg.do_install()
out = capfd.readouterr()
assert "Installing dependency-install" in out[0]
assert "Missing a source id for nosource" in out[1]
def test_nosource_pkg_install_post_install(
install_mockery, mock_fetch, mock_packages):
spec = Spec('nosource-install').concretized()
pkg = spec.package
# Make sure both the install and post-install package methods work.
pkg.do_install()
# Ensure the file created in the package's `install` method exists.
install_txt = os.path.join(spec.prefix, 'install.txt')
assert os.path.isfile(install_txt)
post_install_txt = os.path.join(spec.prefix, 'post-install.txt')
assert os.path.isfile(post_install_txt)
def test_pkg_build_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = spec.package.log_path
assert log_path.endswith(_spack_build_logfile)
env_path = spec.package.env_path
assert env_path.endswith(_spack_build_envfile)
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous log filenames
older_log = 'spack-build.out'
fs.touch(older_log)
assert spec.package.log_path.endswith(older_log)
# Now check the newer log filename
last_log = 'spack-build.txt'
os.rename(older_log, last_log)
assert spec.package.log_path.endswith(last_log)
# Check the old environment file
last_env = 'spack-build.env'
os.rename(last_log, last_env)
assert spec.package.env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_pkg_install_paths(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
log_path = os.path.join(spec.prefix, '.spack', _spack_build_logfile)
assert spec.package.install_log_path == log_path
env_path = os.path.join(spec.prefix, '.spack', _spack_build_envfile)
assert spec.package.install_env_path == env_path
args_path = os.path.join(spec.prefix, '.spack', _spack_configure_argsfile)
assert spec.package.install_configure_args_path == args_path
# Backward compatibility checks
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
# Start with the older of the previous install log filenames
older_log = 'build.out'
fs.touch(older_log)
assert spec.package.install_log_path.endswith(older_log)
# Now check the newer install log filename
last_log = 'build.txt'
os.rename(older_log, last_log)
assert spec.package.install_log_path.endswith(last_log)
# Check the old install environment file
last_env = 'build.env'
os.rename(last_log, last_env)
assert spec.package.install_env_path.endswith(last_env)
# Cleanup
shutil.rmtree(log_dir)
def test_log_install_without_build_files(install_mockery):
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
def test_log_install_with_build_files(install_mockery, monkeypatch):
config_log = 'config.log'
# Retain the original function for use in the monkey patch that is used
# to raise an exception under the desired condition for test coverage.
orig_install_fn = fs.install
def _install(src, dest):
orig_install_fn(src, dest)
if src.endswith(config_log):
raise Exception('Mock log install error')
monkeypatch.setattr(fs, 'install', _install)
spec = Spec('trivial-install-test-package').concretized()
# Set up mock build files and try again to include archive failure
log_path = spec.package.log_path
log_dir = os.path.dirname(log_path)
fs.mkdirp(log_dir)
with fs.working_dir(log_dir):
fs.touch(log_path)
fs.touch(spec.package.env_path)
fs.touch(spec.package.configure_args_path)
install_path = os.path.dirname(spec.package.install_log_path)
fs.mkdirp(install_path)
source = spec.package.stage.source_path
config = os.path.join(source, 'config.log')
fs.touchp(config)
spec.package.archive_files = ['missing', '..', config]
spack.installer.log(spec.package)
assert os.path.exists(spec.package.install_log_path)
assert os.path.exists(spec.package.install_env_path)
assert os.path.exists(spec.package.install_configure_args_path)
archive_dir = os.path.join(install_path, 'archived-files')
source_dir = os.path.dirname(source)
rel_config = os.path.relpath(config, source_dir)
assert os.path.exists(os.path.join(archive_dir, rel_config))
assert not os.path.exists(os.path.join(archive_dir, 'missing'))
expected_errs = [
'OUTSIDE SOURCE PATH', # for '..'
'FAILED TO ARCHIVE' # for rel_config
]
with open(os.path.join(archive_dir, 'errors.txt'), 'r') as fd:
for ln, expected in zip(fd, expected_errs):
assert expected in ln
# Cleanup
shutil.rmtree(log_dir)
def test_unconcretized_install(install_mockery, mock_fetch, mock_packages):
spec = Spec('trivial-install-test-package')
with pytest.raises(ValueError, match='must have a concrete spec'):
spec.package.do_install()
with pytest.raises(ValueError, match="only patch concrete packages"):
spec.package.do_patch()
def test_install_error():
try:
msg = 'test install error'
long_msg = 'this is the long version of test install error'
raise InstallError(msg, long_msg=long_msg)
except Exception as exc:
assert exc.__class__.__name__ == 'InstallError'
assert exc.message == msg
assert exc.long_message == long_msg
| true | true |
f71c77d1c0f627d4c0d8120689ae89c7e1a43d86 | 2,577 | py | Python | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-12-07T21:02:20.000Z | 2019-02-22T14:36:31.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 43 | 2018-11-30T11:31:43.000Z | 2019-04-03T16:09:06.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-11-29T00:31:29.000Z | 2019-02-22T18:50:28.000Z | import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
# These configs are specific to Event Hub Head for Kafka
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION') # /usr/local/etc/openssl/cert.pem
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.955056 | 109 | 0.73962 | import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION')
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| true | true |
f71c78a611dd59c34a836099368a08f02076670b | 9,173 | py | Python | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/timeseries/python/timeseries/state_space_models/varma.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Multivariate autoregressive model (vector autoregression).
Implements the following model (num_blocks = max(ar_order, ma_order + 1)):
y(t, 1) = \sum_{i=1}^{ar_order} ar_coefs[i] * y(t - 1, i)
y(t, i) = y(t - 1, i - 1) + ma_coefs[i - 1] * e(t) for 1 < i < num_blocks
y(t, num_blocks) = y(t - 1, num_blocks - 1) + e(t)
Where e(t) are Gaussian with zero mean and learned covariance.
Each element of ar_coefs and ma_coefs is a [num_features x num_features]
matrix. Each y(t, i) is a vector of length num_features. Indices in the above
equations are one-based. Initial conditions y(0, i) come from prior state (which
may either be learned or left as a constant with high prior covariance).
If ar_order > ma_order, the observation model is:
y(t, 1) + observation_noise(t)
If ma_order >= ar_order, it is (to observe the moving average component):
y(t, 1) + y(t, num_blocks) + observation_noise(t)
Where observation_noise(t) are Gaussian with zero mean and learned covariance.
This implementation uses a formulation which puts all of the autoregressive
coefficients in the transition equation for the observed component, which
enables learning using truncated backpropagation. Noise is not applied directly
to the observed component (with the exception of standard observation noise),
which further aids learning of the autoregressive coefficients when VARMA is in
an ensemble with other models (in which case having an observation noise term is
usually unavoidable).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
"""A VARMA model implementation as a special case of the state space model."""
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
"""Construct a VARMA model.
The size of the latent state for this model is:
num_features * max(autoregressive_order, moving_average_order + 1)
Square matrices of this size are constructed and multiplied.
Args:
autoregressive_order: The maximum autoregressive lag.
moving_average_order: The maximum moving average lag, after which
transient deviations are expected to return to their long-term mean.
configuration: A StateSpaceModelConfiguration object.
"""
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
# TODO(allenl): Evaluate parameter transformations for AR/MA coefficients
# which improve interpretability/stability.
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
"""Construct state transition matrix from VARMA parameters.
Returns:
the state transition matrix. It has shape
[self.state_dimension, self.state_dimension].
"""
# Pad any unused AR blocks with zeros. The extra state is necessary if
# ma_order >= ar_order.
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
"""Construct state noise transform matrix from VARMA parameters.
Returns:
the state noise transform matrix. It has shape
[self.state_dimension, self.num_features].
"""
# Noise is broadcast, through the moving average coefficients, to
# un-observed parts of the latent state.
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
# Deterministically apply noise to the oldest component.
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
"""Construct observation model matrix from VARMA parameters.
Args:
times: A [batch size] vector indicating the times observation models are
requested for. Unused.
Returns:
the observation model matrix. It has shape
[self.num_features, self.state_dimension].
"""
del times # StateSpaceModel will broadcast along the batch dimension
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
# Add a second observed component which "catches" the accumulated moving
# average errors as they reach the end of the state. If ar_order >
# ma_order, this is unnecessary, since accumulated errors cycle naturally.
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
# Most state space models use only an explicit observation noise term to
# model deviations from expectations, and so a low initial transition noise
# parameter is helpful there. Since deviations from expectations are also
# modeled as transition noise in VARMA, we set its initial value based on a
# slight over-estimate empirical observation noise.
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = tensor_shape.dimension_value(
state_noise_transform.shape[1])
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
| 45.636816 | 97 | 0.692903 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class VARMA(state_space_model.StateSpaceModel):
def __init__(self,
autoregressive_order,
moving_average_order,
configuration=state_space_model.StateSpaceModelConfiguration()):
self.ar_order = autoregressive_order
self.ma_order = moving_average_order
self.state_num_blocks = max(autoregressive_order, moving_average_order + 1)
super(VARMA, self).__init__(configuration=configuration)
self.state_dimension = self.state_num_blocks * self.num_features
def _define_parameters(self, observation_transition_tradeoff_log=None):
with variable_scope.variable_scope(self._variable_scope):
self.ar_coefs = variable_scope.get_variable(
name="ar_coefs",
shape=[self.num_features, self.num_features, self.ar_order],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.ma_coefs = variable_scope.get_variable(
name="ma_coefs",
initializer=array_ops.tile(
linalg_ops.eye(self.num_features, dtype=self.dtype)[None, :, :],
[self.ma_order, 1, 1]),
dtype=self.dtype)
super(VARMA, self)._define_parameters(
observation_transition_tradeoff_log=observation_transition_tradeoff_log)
def get_state_transition(self):
ar_coefs_padded = array_ops.reshape(
array_ops.pad(self.ar_coefs,
[[0, 0], [0, 0],
[0, self.state_num_blocks - self.ar_order]]),
[self.num_features, self.state_dimension])
shift_matrix = array_ops.pad(
linalg_ops.eye(
(self.state_num_blocks - 1) * self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features]])
return array_ops.concat([ar_coefs_padded, shift_matrix], axis=0)
def get_noise_transform(self):
ma_coefs_padded = array_ops.reshape(
array_ops.pad(self.ma_coefs,
[[self.state_num_blocks - 1 - self.ma_order, 0], [0, 0],
[0, 0]]),
[(self.state_num_blocks - 1) * self.num_features, self.num_features],
name="noise_transform")
return array_ops.concat(
[ma_coefs_padded,
linalg_ops.eye(self.num_features, dtype=self.dtype)],
axis=0)
def get_observation_model(self, times):
del times
if self.ar_order > self.ma_order or self.state_num_blocks < 2:
return array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0, self.num_features * (self.state_num_blocks - 1)]],
name="observation_model")
else:
return array_ops.concat(
[
array_ops.pad(
linalg_ops.eye(self.num_features, dtype=self.dtype),
[[0, 0], [0,
self.num_features * (self.state_num_blocks - 2)]]),
linalg_ops.eye(self.num_features, dtype=self.dtype)
],
axis=1,
name="observation_model")
def get_state_transition_noise_covariance(
self, minimum_initial_variance=1e-5):
if self._input_statistics is not None:
feature_variance = self._scale_variance(
self._input_statistics.series_start_moments.variance)
initial_transition_noise_scale = math_ops.log(
math_ops.maximum(
math_ops.reduce_mean(feature_variance), minimum_initial_variance))
else:
initial_transition_noise_scale = 0.
state_noise_transform = ops.convert_to_tensor(
self.get_noise_transform(), dtype=self.dtype)
state_noise_dimension = tensor_shape.dimension_value(
state_noise_transform.shape[1])
return math_utils.variable_covariance_matrix(
state_noise_dimension, "state_transition_noise",
dtype=self.dtype,
initial_overall_scale_log=initial_transition_noise_scale)
| true | true |
f71c792738a6eb005cce3420d1463f363558dd6e | 898 | py | Python | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | null | null | null | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | null | null | null | Lms/migrations/versions/4b83761bf52a_users_table.py | stsl256/LMS_for_tinkoff | 5ace2a9d8f8e6c80660171502de6689f746535ed | [
"MIT"
] | 1 | 2020-12-09T00:41:26.000Z | 2020-12-09T00:41:26.000Z | """users table
Revision ID: 4b83761bf52a
Revises: 0d3bdf63aacc
Create Date: 2029-12-29 17:17:20.500426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4b83761bf52a'
down_revision = '0d3bdf63aacc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('city', sa.String(length=64), nullable=True))
op.add_column('user', sa.Column('description', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('phone', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'phone')
op.drop_column('user', 'description')
op.drop_column('user', 'city')
# ### end Alembic commands ### | 29.933333 | 89 | 0.688196 | from alembic import op
import sqlalchemy as sa
revision = '4b83761bf52a'
down_revision = '0d3bdf63aacc'
branch_labels = None
depends_on = None
def upgrade():
add_column('user', sa.Column('phone', sa.String(length=64), nullable=True))
| true | true |
f71c7941417b4404871df8bb404ec9f2347ad2f0 | 1,254 | py | Python | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/dpdk/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Dpdk(MakefilePackage):
"""DPDK is a set of libraries and drivers for fast packet processing.
It supports many processor architectures and both FreeBSD and Linux."""
homepage = "https://github.com/DPDK/dpdk"
url = "https://github.com/DPDK/dpdk/archive/v19.11.tar.gz"
version('20.02', sha256='29e56ea8e47e30110ecb881fa5a37125a865dd2d45b61f68e93e334caaab16b7')
version('19.11', sha256='ce1befb20a5e5c5399b326a39cfa23314a5229c0ced2553f53b09b1ae630706b')
version('19.08', sha256='1ceff1a6f4f8d5f6f62c1682097249227ac5225ccd9638e0af09f5411c681038')
version('19.05', sha256='5fea95cb726e6adaa506dab330e79563ccd4dacf03f126c826aabdced605d32b')
version('19.02', sha256='04885d32c86fff5aefcfffdb8257fed405233602dbcd22f8298be13c2e285a50')
conflicts('target=aarch64:', msg='DPDK is not supported on aarch64.')
depends_on('numactl')
def build(self, spec, prefix):
make('defconfig')
make()
def install(self, spec, prefix):
install_tree('.', prefix)
| 39.1875 | 95 | 0.748804 |
from spack.package import *
class Dpdk(MakefilePackage):
homepage = "https://github.com/DPDK/dpdk"
url = "https://github.com/DPDK/dpdk/archive/v19.11.tar.gz"
version('20.02', sha256='29e56ea8e47e30110ecb881fa5a37125a865dd2d45b61f68e93e334caaab16b7')
version('19.11', sha256='ce1befb20a5e5c5399b326a39cfa23314a5229c0ced2553f53b09b1ae630706b')
version('19.08', sha256='1ceff1a6f4f8d5f6f62c1682097249227ac5225ccd9638e0af09f5411c681038')
version('19.05', sha256='5fea95cb726e6adaa506dab330e79563ccd4dacf03f126c826aabdced605d32b')
version('19.02', sha256='04885d32c86fff5aefcfffdb8257fed405233602dbcd22f8298be13c2e285a50')
conflicts('target=aarch64:', msg='DPDK is not supported on aarch64.')
depends_on('numactl')
def build(self, spec, prefix):
make('defconfig')
make()
def install(self, spec, prefix):
install_tree('.', prefix)
| true | true |
f71c7bece95f106b2a9bb71db5ac6017fee41c58 | 1,757 | py | Python | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | 1 | 2021-02-28T11:39:00.000Z | 2021-02-28T11:39:00.000Z | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | 26 | 2021-02-28T12:07:04.000Z | 2021-02-28T13:04:27.000Z | spdx_lint/lint.py | sthagen/verbose-pancake | f12b38c8aea8aee8f7a593a4669dfe5e0a447ba5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=expression-not-assigned,line-too-long
SPDX_2_2_DCI_TV = {
"SPDXVersion": "SPDX-2.2",
"DataLicense": "CC0-1.0",
"SPDXID": "SPDXRef-DOCUMENT",
"DocumentName": "$_SINGLE_LINE",
"DocumentNamespace": "$_URI_MINUS_PART",
"[ExternalDocumentRef]": [
"DocumentRef-$_IDSTRING $_SPDX_DOCUMENT_URI $_PREFIX_COLON_CHECKSUM",
],
"[LicenseListVersion]": "$_MAJOR.$_MINOR",
"Creator": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
],
"Created": "%Y-%m-%dT%H:%M:%SZ",
"[CreatorComment]": "<text>$_MULTI_LINE_TEXT</text>",
"[DocumentComment]": "<text>$_MULTI_LINE_TEXT</text>",
}
SPDX_2_2_DCI_JSON = { # Reversed engineered from round trip conversion - TODO(sthagen) later use json schema
"SPDXID": "SPDXRef-DOCUMENT",
"spdxVersion": "SPDX-2.2",
"creationInfo": {
"created": "%Y-%m-%dT%H:%M:%SZ",
"creators": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
]
},
"name": "$_SINGLE_LINE",
"dataLicense": "CC0-1.0",
"documentNamespace": "$_URI_MINUS_PART",
}
def spdx_dci_is_valid(sbom):
"""Shallow key level validation for DCI part of SPDX documents."""
if not sbom:
return False
for key in SPDX_2_2_DCI_JSON.keys():
if key.startswith("["):
continue
try:
if not sbom.get(key):
return False
except AttributeError as e:
print(str(sbom), e) # TODO(sthagen) when I am a grown up, I want to really log
return True
| 31.375 | 109 | 0.592487 |
SPDX_2_2_DCI_TV = {
"SPDXVersion": "SPDX-2.2",
"DataLicense": "CC0-1.0",
"SPDXID": "SPDXRef-DOCUMENT",
"DocumentName": "$_SINGLE_LINE",
"DocumentNamespace": "$_URI_MINUS_PART",
"[ExternalDocumentRef]": [
"DocumentRef-$_IDSTRING $_SPDX_DOCUMENT_URI $_PREFIX_COLON_CHECKSUM",
],
"[LicenseListVersion]": "$_MAJOR.$_MINOR",
"Creator": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
],
"Created": "%Y-%m-%dT%H:%M:%SZ",
"[CreatorComment]": "<text>$_MULTI_LINE_TEXT</text>",
"[DocumentComment]": "<text>$_MULTI_LINE_TEXT</text>",
}
SPDX_2_2_DCI_JSON = {
"SPDXID": "SPDXRef-DOCUMENT",
"spdxVersion": "SPDX-2.2",
"creationInfo": {
"created": "%Y-%m-%dT%H:%M:%SZ",
"creators": [
"Person: $_PERSON_NAME [($_EMAIL)]",
"Organization: $_ORGANIZATION [($_EMAIL)]",
"Tool: $_TOOL_IDENTIFIED-$_VERSION",
]
},
"name": "$_SINGLE_LINE",
"dataLicense": "CC0-1.0",
"documentNamespace": "$_URI_MINUS_PART",
}
def spdx_dci_is_valid(sbom):
if not sbom:
return False
for key in SPDX_2_2_DCI_JSON.keys():
if key.startswith("["):
continue
try:
if not sbom.get(key):
return False
except AttributeError as e:
print(str(sbom), e)
return True
| true | true |
f71c7c09de030a029f096f3ac1471f0f9a979e3b | 6,549 | py | Python | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 1 | 2020-11-30T09:01:57.000Z | 2020-11-30T09:01:57.000Z | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | null | null | null | packages/pytea/pytest/benchmarks/transformers/examples/question-answering/run_squad_trainer.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for question-answering."""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import transformers
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset
from transformers import SquadDataTrainingArguments as DataTrainingArguments
from transformers import Trainer, TrainingArguments
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Prepare Question-Answering task
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
is_language_sensitive = hasattr(model.config, "lang2id")
train_dataset = (
SquadDataset(
data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir
)
if training_args.do_train
else None
)
eval_dataset = (
SquadDataset(
data_args,
tokenizer=tokenizer,
mode="dev",
is_language_sensitive=is_language_sensitive,
cache_dir=model_args.cache_dir,
)
if training_args.do_eval
else None
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37.637931 | 133 | 0.703008 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import transformers
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset
from transformers import SquadDataTrainingArguments as DataTrainingArguments
from transformers import Trainer, TrainingArguments
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
is_language_sensitive = hasattr(model.config, "lang2id")
train_dataset = (
SquadDataset(
data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir
)
if training_args.do_train
else None
)
eval_dataset = (
SquadDataset(
data_args,
tokenizer=tokenizer,
mode="dev",
is_language_sensitive=is_language_sensitive,
cache_dir=model_args.cache_dir,
)
if training_args.do_eval
else None
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
def _mp_fn(index):
main()
if __name__ == "__main__":
main()
| true | true |
f71c7c547c5784ada99fdc35a9188f398ce31ecd | 123 | py | Python | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 6 | 2018-06-06T08:37:53.000Z | 2020-06-01T13:13:13.000Z | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 30 | 2018-06-08T02:47:18.000Z | 2018-07-25T07:07:07.000Z | adlmagics/adlmagics/__init__.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
] | 5 | 2018-06-06T08:37:55.000Z | 2021-01-07T09:15:15.000Z | from adlmagics.adlmagics_main import AdlMagics
def load_ipython_extension(ipython):
ipython.register_magics(AdlMagics) | 30.75 | 46 | 0.853659 | from adlmagics.adlmagics_main import AdlMagics
def load_ipython_extension(ipython):
ipython.register_magics(AdlMagics) | true | true |
f71c7e0a03d097595b703379f84e0942a21fd206 | 4,909 | py | Python | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2019-07-12T05:38:06.000Z | 2019-07-12T05:38:06.000Z | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_self_subject_access_review_spec.py | woqer/python | 3a6fe8231cefe1fa39a0a69d4b2f33044ab32745 | [
"Apache-2.0"
] | 1 | 2021-05-18T12:25:56.000Z | 2021-05-18T12:25:56.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectAccessReviewSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'non_resource_attributes': 'V1beta1NonResourceAttributes',
'resource_attributes': 'V1beta1ResourceAttributes'
}
attribute_map = {
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes'
}
def __init__(self, non_resource_attributes=None, resource_attributes=None):
"""
V1beta1SelfSubjectAccessReviewSpec - a model defined in Swagger
"""
self._non_resource_attributes = None
self._resource_attributes = None
self.discriminator = None
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
@property
def non_resource_attributes(self):
"""
Gets the non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
NonResourceAttributes describes information for a non-resource access request
:return: The non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:rtype: V1beta1NonResourceAttributes
"""
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
"""
Sets the non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
NonResourceAttributes describes information for a non-resource access request
:param non_resource_attributes: The non_resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:type: V1beta1NonResourceAttributes
"""
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
"""
Gets the resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
ResourceAuthorizationAttributes describes information for a resource access request
:return: The resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:rtype: V1beta1ResourceAttributes
"""
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
"""
Sets the resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
ResourceAuthorizationAttributes describes information for a resource access request
:param resource_attributes: The resource_attributes of this V1beta1SelfSubjectAccessReviewSpec.
:type: V1beta1ResourceAttributes
"""
self._resource_attributes = resource_attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1SelfSubjectAccessReviewSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.670968 | 111 | 0.644938 |
from pprint import pformat
from six import iteritems
import re
class V1beta1SelfSubjectAccessReviewSpec(object):
swagger_types = {
'non_resource_attributes': 'V1beta1NonResourceAttributes',
'resource_attributes': 'V1beta1ResourceAttributes'
}
attribute_map = {
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes'
}
def __init__(self, non_resource_attributes=None, resource_attributes=None):
self._non_resource_attributes = None
self._resource_attributes = None
self.discriminator = None
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
@property
def non_resource_attributes(self):
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
self._resource_attributes = resource_attributes
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta1SelfSubjectAccessReviewSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71c7edc2ae9ca95fcb919548ce178feef3c1b16 | 2,805 | py | Python | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 2 | 2021-08-04T01:04:06.000Z | 2021-08-04T01:04:08.000Z | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2022-03-31T03:53:22.000Z | 2022-03-31T03:53:22.000Z | st2common/tests/unit/test_triggers_registrar.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2019-10-11T14:42:28.000Z | 2019-10-11T14:42:28.000Z | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import st2common.bootstrap.triggersregistrar as triggers_registrar
from st2common.persistence.trigger import Trigger
from st2common.persistence.trigger import TriggerType
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = [
'TriggersRegistrarTestCase'
]
class TriggersRegistrarTestCase(CleanDbTestCase):
def test_register_all_triggers(self):
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
packs_base_path = get_fixtures_packs_base_path()
count = triggers_registrar.register_triggers(packs_base_paths=[packs_base_path])
self.assertEqual(count, 2)
# Verify TriggerTypeDB and corresponding TriggerDB objects have been created
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
def test_register_triggers_from_pack(self):
base_path = get_fixtures_packs_base_path()
pack_dir = os.path.join(base_path, 'dummy_pack_1')
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
count = triggers_registrar.register_triggers(pack_dir=pack_dir)
self.assertEqual(count, 2)
# Verify TriggerTypeDB and corresponding TriggerDB objects have been created
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
self.assertEqual(trigger_type_dbs[0].name, 'event_handler')
self.assertEqual(trigger_type_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].name, 'event_handler')
self.assertEqual(trigger_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].type, 'dummy_pack_1.event_handler')
self.assertEqual(trigger_type_dbs[1].name, 'head_sha_monitor')
self.assertEqual(trigger_type_dbs[1].pack, 'dummy_pack_1')
self.assertEqual(trigger_type_dbs[1].payload_schema['type'], 'object')
| 40.652174 | 88 | 0.745455 |
from __future__ import absolute_import
import os
import st2common.bootstrap.triggersregistrar as triggers_registrar
from st2common.persistence.trigger import Trigger
from st2common.persistence.trigger import TriggerType
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = [
'TriggersRegistrarTestCase'
]
class TriggersRegistrarTestCase(CleanDbTestCase):
def test_register_all_triggers(self):
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
packs_base_path = get_fixtures_packs_base_path()
count = triggers_registrar.register_triggers(packs_base_paths=[packs_base_path])
self.assertEqual(count, 2)
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
def test_register_triggers_from_pack(self):
base_path = get_fixtures_packs_base_path()
pack_dir = os.path.join(base_path, 'dummy_pack_1')
trigger_type_dbs = TriggerType.get_all()
self.assertEqual(len(trigger_type_dbs), 0)
count = triggers_registrar.register_triggers(pack_dir=pack_dir)
self.assertEqual(count, 2)
trigger_type_dbs = TriggerType.get_all()
trigger_dbs = Trigger.get_all()
self.assertEqual(len(trigger_type_dbs), 2)
self.assertEqual(len(trigger_dbs), 2)
self.assertEqual(trigger_type_dbs[0].name, 'event_handler')
self.assertEqual(trigger_type_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].name, 'event_handler')
self.assertEqual(trigger_dbs[0].pack, 'dummy_pack_1')
self.assertEqual(trigger_dbs[0].type, 'dummy_pack_1.event_handler')
self.assertEqual(trigger_type_dbs[1].name, 'head_sha_monitor')
self.assertEqual(trigger_type_dbs[1].pack, 'dummy_pack_1')
self.assertEqual(trigger_type_dbs[1].payload_schema['type'], 'object')
| true | true |
f71c8126e5ce154c4f9e4de6a8537b75a21c3612 | 1,486 | py | Python | examples/node_labels.py | venukarnati92/python-1 | 3fabf9ed9f4758fb5133975a58fc147471e91d9d | [
"Apache-2.0"
] | 4,417 | 2018-01-13T04:30:48.000Z | 2022-03-31T15:33:59.000Z | examples/node_labels.py | belajarqywok/python | b15bea16a87ad03136a4627941ac437582ea4657 | [
"Apache-2.0"
] | 1,414 | 2018-01-12T19:31:56.000Z | 2022-03-31T22:01:02.000Z | examples/node_labels.py | palnabarun/python | 6b01c95e1673c0787d3d688b361bfd995d62dd98 | [
"Apache-2.0"
] | 2,854 | 2018-01-14T08:57:33.000Z | 2022-03-31T01:41:56.000Z | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates the following:
- Get a list of all the cluster nodes
- Iterate through each node list item
- Add or overwirite label "foo" with the value "bar"
- Remove the label "baz"
- Return the list of node with updated labels
"""
from kubernetes import client, config
def main():
config.load_kube_config()
api_instance = client.CoreV1Api()
body = {
"metadata": {
"labels": {
"foo": "bar",
"baz": None}
}
}
# Listing the cluster nodes
node_list = api_instance.list_node()
print("%s\t\t%s" % ("NAME", "LABELS"))
# Patching the node labels
for node in node_list.items:
api_response = api_instance.patch_node(node.metadata.name, body)
print("%s\t%s" % (node.metadata.name, node.metadata.labels))
if __name__ == '__main__':
main()
| 28.576923 | 74 | 0.662853 |
from kubernetes import client, config
def main():
config.load_kube_config()
api_instance = client.CoreV1Api()
body = {
"metadata": {
"labels": {
"foo": "bar",
"baz": None}
}
}
node_list = api_instance.list_node()
print("%s\t\t%s" % ("NAME", "LABELS"))
for node in node_list.items:
api_response = api_instance.patch_node(node.metadata.name, body)
print("%s\t%s" % (node.metadata.name, node.metadata.labels))
if __name__ == '__main__':
main()
| true | true |
f71c817e947e6fd2bca33380c21307542dc6f585 | 110,038 | py | Python | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 14 | 2015-07-29T06:20:00.000Z | 2021-03-21T10:23:38.000Z | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 32 | 2015-02-13T18:34:44.000Z | 2020-03-17T15:08:51.000Z | hermes/hermes_parser.py | scottfrazer/hermes | d82d916dd20da58c056b18dbb9b6c01a3700f3e1 | [
"MIT"
] | 8 | 2015-04-22T11:46:59.000Z | 2019-03-29T22:58:38.000Z |
import sys
import os
import re
import base64
import argparse
from collections import OrderedDict
# Common Code #
def parse_tree_string(parsetree, indent=None, b64_source=True, indent_level=0, debug=False):
indent_str = (' ' * indent * indent_level) if indent else ''
if isinstance(parsetree, ParseTree):
children = [parse_tree_string(child, indent, b64_source, indent_level+1, debug) for child in parsetree.children]
debug_str = parsetree.debug_str() if debug else ''
if indent is None or len(children) == 0:
return '{0}({1}: {2}{3})'.format(indent_str, parsetree.nonterminal, debug_str, ', '.join(children))
else:
return '{0}({1}:{2}\n{3}\n{4})'.format(
indent_str,
parsetree.nonterminal,
debug_str,
',\n'.join(children),
indent_str
)
elif isinstance(parsetree, Terminal):
return indent_str + parsetree.dumps(b64_source=b64_source)
def ast_string(ast, indent=None, b64_source=True, indent_level=0):
indent_str = (' ' * indent * indent_level) if indent else ''
next_indent_str = (' ' * indent * (indent_level+1)) if indent else ''
if isinstance(ast, Ast):
children = OrderedDict([(k, ast_string(v, indent, b64_source, indent_level+1)) for k, v in ast.attributes.items()])
if indent is None:
return '({0}: {1})'.format(
ast.name,
', '.join('{0}={1}'.format(k, v) for k, v in children.items())
)
else:
return '({0}:\n{1}\n{2})'.format(
ast.name,
',\n'.join(['{0}{1}={2}'.format(next_indent_str, k, v) for k, v in children.items()]),
indent_str
)
elif isinstance(ast, list):
children = [ast_string(element, indent, b64_source, indent_level+1) for element in ast]
if indent is None or len(children) == 0:
return '[{0}]'.format(', '.join(children))
else:
return '[\n{1}\n{0}]'.format(
indent_str,
',\n'.join(['{0}{1}'.format(next_indent_str, child) for child in children]),
)
elif isinstance(ast, Terminal):
return ast.dumps(b64_source=b64_source)
class Terminal:
def __init__(self, id, str, source_string, resource, line, col):
self.__dict__.update(locals())
def getId(self):
return self.id
def ast(self):
return self
def dumps(self, b64_source=True, **kwargs):
source_string = base64.b64encode(self.source_string.encode('utf-8')).decode('utf-8') if b64_source else self.source_string
return '<{resource}:{line}:{col} {terminal} "{source}">'.format(
resource=self.resource,
line=self.line,
col=self.col,
terminal=self.str,
source=source_string
)
def __str__(self):
return self.dumps()
class NonTerminal():
def __init__(self, id, str):
self.__dict__.update(locals())
self.list = False
def __str__(self):
return self.str
class AstTransform:
pass
class AstTransformSubstitution(AstTransform):
def __init__(self, idx):
self.__dict__.update(locals())
def __repr__(self):
return '$' + str(self.idx)
def __str__(self):
return self.__repr__()
class AstTransformNodeCreator(AstTransform):
def __init__( self, name, parameters ):
self.__dict__.update(locals())
def __repr__( self ):
return self.name + '( ' + ', '.join(['%s=$%s' % (k,str(v)) for k,v in self.parameters.items()]) + ' )'
def __str__(self):
return self.__repr__()
class AstList(list):
def ast(self):
retval = []
for ast in self:
retval.append(ast.ast())
return retval
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class ParseTree():
def __init__(self, nonterminal):
self.__dict__.update(locals())
self.children = []
self.astTransform = None
self.isExpr = False
self.isNud = False
self.isPrefix = False
self.isInfix = False
self.nudMorphemeCount = 0
self.isExprNud = False # true for rules like _expr := {_expr} + {...}
self.list_separator_id = None
self.list = False
def debug_str(self):
from copy import deepcopy
def h(v):
if v == False or v is None:
return str(v)
from xtermcolor import colorize
return colorize(str(v), ansi=190)
d = deepcopy(self.__dict__)
for key in ['self', 'nonterminal', 'children']:
del d[key]
f = {k: v for k, v in d.items() if v != False and v is not None}
return ' [{}]'.format(', '.join(['{}={}'.format(k,h(v)) for k,v in f.items()]))
def add(self, tree):
self.children.append( tree )
def ast(self):
if self.list == True:
r = AstList()
if len(self.children) == 0:
return r
for child in self.children:
if isinstance(child, Terminal) and self.list_separator_id is not None and child.id == self.list_separator_id:
continue
r.append(child.ast())
return r
elif self.isExpr:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
if idx == '$':
child = self.children[0]
elif isinstance(self.children[0], ParseTree) and \
self.children[0].isNud and \
not self.children[0].isPrefix and \
not self.isExprNud and \
not self.isInfix:
if idx < self.children[0].nudMorphemeCount:
child = self.children[0].children[idx]
else:
index = idx - self.children[0].nudMorphemeCount + 1
child = self.children[index]
elif len(self.children) == 1 and not isinstance(self.children[0], ParseTree) and not isinstance(self.children[0], list):
return self.children[0]
else:
child = self.children[idx]
parameters[name] = child.ast()
return Ast(self.astTransform.name, parameters)
else:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
parameters[name] = self.children[idx].ast()
return Ast(self.astTransform.name, parameters)
elif len(self.children):
return self.children[0].ast()
else:
return None
def dumps(self, indent=None, b64_source=True, debug=False):
args = locals()
del args['self']
return parse_tree_string(self, **args)
class Ast():
def __init__(self, name, attributes):
self.__dict__.update(locals())
def attr(self, attr):
return self.attributes[attr]
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class SyntaxError(Exception):
def __init__(self, message):
self.__dict__.update(locals())
def __str__(self):
return self.message
class TokenStream(list):
def __init__(self, arg=[]):
super(TokenStream, self).__init__(arg)
self.index = 0
def advance(self):
self.index += 1
return self.current()
def last(self):
return self[-1]
def current(self):
try:
return self[self.index]
except IndexError:
return None
class DefaultSyntaxErrorHandler:
def __init__(self):
self.errors = []
def _error(self, string):
error = SyntaxError(string)
self.errors.append(error)
return error
def unexpected_eof(self):
return self._error("Error: unexpected end of file")
def excess_tokens(self):
return self._error("Finished parsing without consuming all tokens.")
def unexpected_symbol(self, nonterminal, actual_terminal, expected_terminals, rule):
return self._error("Unexpected symbol (line {line}, col {col}) when parsing parse_{nt}. Expected {expected}, got {actual}.".format(
line=actual_terminal.line,
col=actual_terminal.col,
nt=nonterminal,
expected=', '.join(expected_terminals),
actual=actual_terminal
))
def no_more_tokens(self, nonterminal, expected_terminal, last_terminal):
return self._error("No more tokens. Expecting " + expected_terminal)
def invalid_terminal(self, nonterminal, invalid_terminal):
return self._error("Invalid symbol ID: {} ({})".format(invalid_terminal.id, invalid_terminal.string))
def unrecognized_token(self, string, line, col):
lines = string.split('\n')
bad_line = lines[line-1]
return self._error('Unrecognized token on line {}, column {}:\n\n{}\n{}'.format(
line, col, bad_line, ''.join([' ' for x in range(col-1)]) + '^'
))
def missing_list_items(self, method, required, found, last):
return self._error("List for {} requires {} items but only {} were found.".format(method, required, found))
def missing_terminator(self, method, terminator, last):
return self._error("List for "+method+" is missing a terminator")
class ParserContext:
def __init__(self, tokens, errors):
self.__dict__.update(locals())
self.nonterminal_string = None
self.rule_string = None
# Parser Code #
terminals = {
0: 'regex_enum',
1: 'dash',
2: 'lbrace',
3: 'arrow',
4: 'unary',
5: 'rsquare',
6: 'infix_rule_hint',
7: 'equals',
8: 'stack_push',
9: 'code_start',
10: 'langle',
11: 'no_group',
12: 'expr_rule_hint',
13: 'partials',
14: 'regex',
15: 'rbrace',
16: 'code',
17: 'identifier',
18: 'regex_partial',
19: 'rangle',
20: 'language',
21: 'integer',
22: 'left',
23: 'rparen',
24: 'right',
25: 'mixfix_rule_hint',
26: 'colon',
27: 'expression_divider',
28: 'prefix_rule_hint',
29: 'asterisk',
30: 'll1_rule_hint',
31: 'string',
32: 'lexer',
33: 'grammar',
34: 'terminal',
35: 'lsquare',
36: 'parser',
37: 'lparen',
38: 'comma',
39: 'action',
40: 'pipe',
41: 'parser_expression',
42: 'nonterminal',
43: 'mode',
44: 'nonterminal_reference',
45: 'null',
'regex_enum': 0,
'dash': 1,
'lbrace': 2,
'arrow': 3,
'unary': 4,
'rsquare': 5,
'infix_rule_hint': 6,
'equals': 7,
'stack_push': 8,
'code_start': 9,
'langle': 10,
'no_group': 11,
'expr_rule_hint': 12,
'partials': 13,
'regex': 14,
'rbrace': 15,
'code': 16,
'identifier': 17,
'regex_partial': 18,
'rangle': 19,
'language': 20,
'integer': 21,
'left': 22,
'rparen': 23,
'right': 24,
'mixfix_rule_hint': 25,
'colon': 26,
'expression_divider': 27,
'prefix_rule_hint': 28,
'asterisk': 29,
'll1_rule_hint': 30,
'string': 31,
'lexer': 32,
'grammar': 33,
'terminal': 34,
'lsquare': 35,
'parser': 36,
'lparen': 37,
'comma': 38,
'action': 39,
'pipe': 40,
'parser_expression': 41,
'nonterminal': 42,
'mode': 43,
'nonterminal_reference': 44,
'null': 45,
}
# table[nonterminal][terminal] = rule
table = [
[-1, -1, 16, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 72, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 70, -1, 71, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 73, -1, -1, -1, -1, -1, -1, -1, 74, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 86, -1, -1, -1, -1, -1, -1, -1, -1, -1, 85, -1, -1, 84, -1, -1, -1, -1, -1, -1, -1, 83, -1, -1, 87],
[-1, -1, -1, 49, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1],
[-1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 59, -1, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1],
[7, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1],
[-1, -1, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, -1, -1, -1, 64, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 79, -1],
[21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1, -1, 4, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 69, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, 63, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 61, -1, -1, 62, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, 51, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, 2, -1, -1, -1, -1, 2, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 38, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, 47, -1, 53, -1, -1, -1, -1, 53, 47, -1, -1, 52],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 82, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[35, -1, -1, -1, -1, -1, -1, -1, 35, 35, -1, 34, -1, 35, 35, 35, -1, 35, -1, -1, -1, -1, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 35, 34, -1, -1, -1, 35, -1, -1, -1, 35, -1, 35],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41, -1, -1, -1, -1, 42, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 32, -1, -1, -1, -1, -1, -1, -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, 33, -1, -1, -1, -1, -1, 39],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 76, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 56, -1, -1, -1, -1, -1, -1, -1, -1],
]
nonterminal_first = {
46: [2, -1],
47: [24, 4, 22],
48: [34, -1],
49: [36],
50: [34, 42, 17],
51: [34, 42, 21, 31, 45],
52: [3, -1],
53: [27, -1],
54: [0, 13, 43, 14, 9],
55: [43],
56: [34, 42, -1, 17],
57: [44, 17],
58: [0],
59: [32, 36, 41],
60: [37, -1],
61: [0, 13, 14, -1, 43, 9],
62: [36, -1, 41, 32],
63: [14, -1],
64: [37],
65: [34],
66: [12, 37],
67: [29, 1],
68: [30],
69: [37, 12, -1],
70: [9],
71: [28, 25, 6],
72: [34, -1, 3, 42, 17],
73: [17],
74: [32, 36, 41],
75: [-1, 17],
76: [35, 11],
77: [3, 34, -1, 42, 17],
78: [37],
79: [17],
80: [3, 34, 36, -1, 41, 42, 17, 45],
81: [34, -1, 39, 8, 17, 45],
82: [34, -1, 42, 17],
83: [-1, 17],
84: [2],
85: [14],
86: [27],
87: [29, 1],
88: [41],
89: [32],
90: [17],
91: [14, 0],
92: [35, 11, -1],
93: [-1, 17],
94: [36, 41],
95: [34, 39, 8, 17, 45],
96: [30, -1],
97: [33],
98: [13],
99: [3],
100: [34, -1, 31, 42, 21, 45],
101: [37, -1],
}
nonterminal_follow = {
46: [3],
47: [23],
48: [23],
49: [30, 32, 15, 36, 41],
50: [12, 30, 3, 34, 15, 37, 40, 42, 17],
51: [23, 38],
52: [27, 12, 30, 15, 37, 40],
53: [15, 12, 37, 3],
54: [0, 13, 14, 15, 43, 9],
55: [0, 13, 14, 15, 43, 9],
56: [15, 12, 37, 3],
57: [27, 12, 30, 15, 37, 40],
58: [0, 13, 14, 15, 43, 9],
59: [15, 36, 41, 32],
60: [15, 17],
61: [15],
62: [15],
63: [15],
64: [15, 17],
65: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
66: [15, 12, 37],
67: [26],
68: [15, 30],
69: [15],
70: [0, 13, 14, 15, 43, 9],
71: [15, 12, 37],
72: [15, 30, 40],
73: [23, 38],
74: [15, 36, 41, 32],
75: [15],
76: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
77: [15, 30],
78: [12],
79: [15, 17],
80: [15, 30],
81: [0, 13, 14, 15, 43, 9],
82: [12, 30, 3, 15, 37, 40],
83: [15, 23],
84: [3],
85: [15, 14],
86: [15, 12, 37, 3],
87: [23],
88: [30, 32, 15, 36, 41],
89: [15, 36, 41, 32],
90: [3, 12, 30, 34, 15, 37, 40, 42, 17],
91: [0, 13, 14, 15, 43, 9],
92: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
93: [23],
94: [30, 32, 15, 36, 41],
95: [0, 13, 14, 34, 15, 39, 43, 8, 9, 17, 45],
96: [15],
97: [-1],
98: [0, 13, 14, 15, 43, 9],
99: [27, 12, 30, 15, 37, 40],
100: [23],
101: [12],
}
rule_first = {
0: [32, 36, -1, 41],
1: [33],
2: [32, 36, 41],
3: [32],
4: [36, 41],
5: [0, 13, 14, -1, 43, 9],
6: [32],
7: [14, 0],
8: [43],
9: [13],
10: [9],
11: [9],
12: [14, -1],
13: [13],
14: [14],
15: [0],
16: [2],
17: [-1],
18: [34, -1, 39, 8, 17, 45],
19: [14],
20: [-1, 17],
21: [0],
22: [37],
23: [-1],
24: [17],
25: [-1, 17],
26: [37],
27: [2],
28: [34],
29: [34],
30: [-1],
31: [17],
32: [8],
33: [39],
34: [35, 11],
35: [-1],
36: [34],
37: [35],
38: [11],
39: [45],
40: [43],
41: [36],
42: [41],
43: [30, -1],
44: [36],
45: [30],
46: [34, 3, -1, 17, 42],
47: [3, 42, 34, 17, -1],
48: [34, 42, -1, 17],
49: [3],
50: [-1],
51: [34, 42, -1, 3, 17],
52: [45],
53: [36, 41],
54: [12, 37, -1],
55: [41],
56: [37],
57: [-1],
58: [12, 37],
59: [27],
60: [-1],
61: [25],
62: [28],
63: [6],
64: [34, 42, -1, 17],
65: [27],
66: [37],
67: [29, 1],
68: [29],
69: [1],
70: [22],
71: [24],
72: [4],
73: [34],
74: [42],
75: [17],
76: [3],
77: [-1, 17],
78: [17],
79: [44],
80: [17],
81: [31, 21, 34, -1, 42, 45],
82: [17],
83: [42],
84: [34],
85: [31],
86: [21],
87: [45],
}
nonterminal_rules = {
46: [
"$_gen3 = $regex_options",
"$_gen3 = :_empty",
],
47: [
"$associativity = :left",
"$associativity = :right",
"$associativity = :unary",
],
48: [
"$_gen8 = $terminal",
"$_gen8 = :_empty",
],
49: [
"$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
],
50: [
"$morpheme = :terminal",
"$morpheme = :nonterminal",
"$morpheme = $macro",
],
51: [
"$macro_parameter = :nonterminal",
"$macro_parameter = :terminal",
"$macro_parameter = :string",
"$macro_parameter = :integer",
"$macro_parameter = :null",
],
52: [
"$_gen13 = $ast_transform",
"$_gen13 = :_empty",
],
53: [
"$_gen16 = $led",
"$_gen16 = :_empty",
],
54: [
"$lexer_atom = $lexer_regex",
"$lexer_atom = $lexer_mode",
"$lexer_atom = $lexer_partials",
"$lexer_atom = $lexer_code",
],
55: [
"$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
],
56: [
"$nud = $_gen12",
],
57: [
"$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
"$ast_transform_sub = :nonterminal_reference",
],
58: [
"$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
],
59: [
"$body_element_sub = $lexer",
"$body_element_sub = $parser",
],
60: [
"$_gen6 = $regex_enumeration_options",
"$_gen6 = :_empty",
],
61: [
"$_gen1 = list($lexer_atom)",
],
62: [
"$_gen0 = list($body_element)",
],
63: [
"$_gen2 = list($regex_partial)",
],
64: [
"$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
],
65: [
"$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
],
66: [
"$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
],
67: [
"$binding_power_marker = :asterisk",
"$binding_power_marker = :dash",
],
68: [
"$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
],
69: [
"$_gen14 = list($expression_rule)",
],
70: [
"$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
],
71: [
"$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
"$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
"$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
],
72: [
"$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
],
73: [
"$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
],
74: [
"$body_element = $body_element_sub",
],
75: [
"$_gen5 = list($regex_enumeration)",
],
76: [
"$match_group = :lsquare :integer :rsquare -> $1",
"$match_group = :no_group",
],
77: [
"$_gen11 = list($rule,:pipe)",
],
78: [
"$binding_power = :lparen $precedence :rparen -> $1",
],
79: [
"$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
],
80: [
"$ll1_rule_rhs = $_gen11",
"$ll1_rule_rhs = :null -> NullProduction( )",
"$ll1_rule_rhs = $parser",
],
81: [
"$_gen4 = list($lexer_target)",
],
82: [
"$_gen12 = list($morpheme)",
],
83: [
"$_gen7 = list(:identifier,:comma)",
],
84: [
"$regex_options = :lbrace $_gen7 :rbrace -> $1",
],
85: [
"$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
],
86: [
"$led = :expression_divider $_gen12 -> $1",
],
87: [
"$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
],
88: [
"$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
],
89: [
"$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
],
90: [
"$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
],
91: [
"$lexer_regex = $enumerated_regex",
"$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
],
92: [
"$_gen9 = $match_group",
"$_gen9 = :_empty",
],
93: [
"$_gen17 = list($ast_parameter,:comma)",
],
94: [
"$parser = $parser_ll1",
"$parser = $parser_expression",
],
95: [
"$lexer_target = $terminal",
"$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
"$lexer_target = :stack_push",
"$lexer_target = :action",
"$lexer_target = :null -> Null( )",
],
96: [
"$_gen10 = list($ll1_rule)",
],
97: [
"$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
],
98: [
"$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
],
99: [
"$ast_transform = :arrow $ast_transform_sub -> $1",
],
100: [
"$_gen18 = list($macro_parameter,:comma)",
],
101: [
"$_gen15 = $binding_power",
"$_gen15 = :_empty",
],
}
rules = {
0: "$_gen0 = list($body_element)",
1: "$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
2: "$body_element = $body_element_sub",
3: "$body_element_sub = $lexer",
4: "$body_element_sub = $parser",
5: "$_gen1 = list($lexer_atom)",
6: "$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
7: "$lexer_atom = $lexer_regex",
8: "$lexer_atom = $lexer_mode",
9: "$lexer_atom = $lexer_partials",
10: "$lexer_atom = $lexer_code",
11: "$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
12: "$_gen2 = list($regex_partial)",
13: "$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
14: "$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
15: "$lexer_regex = $enumerated_regex",
16: "$_gen3 = $regex_options",
17: "$_gen3 = :_empty",
18: "$_gen4 = list($lexer_target)",
19: "$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
20: "$_gen5 = list($regex_enumeration)",
21: "$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
22: "$_gen6 = $regex_enumeration_options",
23: "$_gen6 = :_empty",
24: "$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
25: "$_gen7 = list(:identifier,:comma)",
26: "$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
27: "$regex_options = :lbrace $_gen7 :rbrace -> $1",
28: "$lexer_target = $terminal",
29: "$_gen8 = $terminal",
30: "$_gen8 = :_empty",
31: "$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
32: "$lexer_target = :stack_push",
33: "$lexer_target = :action",
34: "$_gen9 = $match_group",
35: "$_gen9 = :_empty",
36: "$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
37: "$match_group = :lsquare :integer :rsquare -> $1",
38: "$match_group = :no_group",
39: "$lexer_target = :null -> Null( )",
40: "$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
41: "$parser = $parser_ll1",
42: "$parser = $parser_expression",
43: "$_gen10 = list($ll1_rule)",
44: "$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
45: "$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
46: "$_gen11 = list($rule,:pipe)",
47: "$ll1_rule_rhs = $_gen11",
48: "$_gen12 = list($morpheme)",
49: "$_gen13 = $ast_transform",
50: "$_gen13 = :_empty",
51: "$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
52: "$ll1_rule_rhs = :null -> NullProduction( )",
53: "$ll1_rule_rhs = $parser",
54: "$_gen14 = list($expression_rule)",
55: "$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
56: "$_gen15 = $binding_power",
57: "$_gen15 = :_empty",
58: "$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
59: "$_gen16 = $led",
60: "$_gen16 = :_empty",
61: "$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
62: "$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
63: "$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
64: "$nud = $_gen12",
65: "$led = :expression_divider $_gen12 -> $1",
66: "$binding_power = :lparen $precedence :rparen -> $1",
67: "$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
68: "$binding_power_marker = :asterisk",
69: "$binding_power_marker = :dash",
70: "$associativity = :left",
71: "$associativity = :right",
72: "$associativity = :unary",
73: "$morpheme = :terminal",
74: "$morpheme = :nonterminal",
75: "$morpheme = $macro",
76: "$ast_transform = :arrow $ast_transform_sub -> $1",
77: "$_gen17 = list($ast_parameter,:comma)",
78: "$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
79: "$ast_transform_sub = :nonterminal_reference",
80: "$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
81: "$_gen18 = list($macro_parameter,:comma)",
82: "$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
83: "$macro_parameter = :nonterminal",
84: "$macro_parameter = :terminal",
85: "$macro_parameter = :string",
86: "$macro_parameter = :integer",
87: "$macro_parameter = :null",
}
def is_terminal(id): return isinstance(id, int) and 0 <= id <= 45
def parse(tokens, errors=None, start=None):
if errors is None:
errors = DefaultSyntaxErrorHandler()
if isinstance(tokens, str):
tokens = lex(tokens, 'string', errors)
ctx = ParserContext(tokens, errors)
tree = parse_grammar(ctx)
if tokens.current() != None:
raise ctx.errors.excess_tokens()
return tree
def expect(ctx, terminal_id):
current = ctx.tokens.current()
if not current:
raise ctx.errors.no_more_tokens(ctx.nonterminal, terminals[terminal_id], ctx.tokens.last())
if current.id != terminal_id:
raise ctx.errors.unexpected_symbol(ctx.nonterminal, current, [terminals[terminal_id]], ctx.rule)
next = ctx.tokens.advance()
if next and not is_terminal(next.id):
raise ctx.errors.invalid_terminal(ctx.nonterminal, next)
return current
def parse__gen18(ctx):
tree = ParseTree(NonTerminal(100, '_gen18'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[100]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(100)):
tree.add(parse_macro_parameter(ctx))
ctx.nonterminal = "_gen18" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen5(ctx):
tree = ParseTree(NonTerminal(75, '_gen5'))
tree.list = True;
ctx.nonterminal = "_gen5"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[75]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(75)):
tree.add(parse_regex_enumeration(ctx))
ctx.nonterminal = "_gen5" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen11(ctx):
tree = ParseTree(NonTerminal(77, '_gen11'))
tree.list = True;
tree.list_separator_id = 40
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[77]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(77)):
tree.add(parse_rule(ctx))
ctx.nonterminal = "_gen11" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 40:
tree.add(expect(ctx, 40));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen17(ctx):
tree = ParseTree(NonTerminal(93, '_gen17'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[93]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(93)):
tree.add(parse_ast_parameter(ctx))
ctx.nonterminal = "_gen17" # Horrible -- because parse_* can reset this
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen1(ctx):
tree = ParseTree(NonTerminal(61, '_gen1'))
tree.list = True;
ctx.nonterminal = "_gen1"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[61]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(61)):
tree.add(parse_lexer_atom(ctx))
ctx.nonterminal = "_gen1" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen10(ctx):
tree = ParseTree(NonTerminal(96, '_gen10'))
tree.list = True;
ctx.nonterminal = "_gen10"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[96]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(96)):
tree.add(parse_ll1_rule(ctx))
ctx.nonterminal = "_gen10" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen0(ctx):
tree = ParseTree(NonTerminal(62, '_gen0'))
tree.list = True;
ctx.nonterminal = "_gen0"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[62]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(62)):
tree.add(parse_body_element(ctx))
ctx.nonterminal = "_gen0" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen4(ctx):
tree = ParseTree(NonTerminal(81, '_gen4'))
tree.list = True;
ctx.nonterminal = "_gen4"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[81]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(81)):
tree.add(parse_lexer_target(ctx))
ctx.nonterminal = "_gen4" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen2(ctx):
tree = ParseTree(NonTerminal(63, '_gen2'))
tree.list = True;
ctx.nonterminal = "_gen2"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[63]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(63)):
tree.add(parse_regex_partial(ctx))
ctx.nonterminal = "_gen2" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen12(ctx):
tree = ParseTree(NonTerminal(82, '_gen12'))
tree.list = True;
ctx.nonterminal = "_gen12"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[82]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(82)):
tree.add(parse_morpheme(ctx))
ctx.nonterminal = "_gen12" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen7(ctx):
tree = ParseTree(NonTerminal(83, '_gen7'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen7"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[83]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(83)):
tree.add(expect(ctx, 17))
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen14(ctx):
tree = ParseTree(NonTerminal(69, '_gen14'))
tree.list = True;
ctx.nonterminal = "_gen14"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[69]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(69)):
tree.add(parse_expression_rule(ctx))
ctx.nonterminal = "_gen14" # Horrible -- because parse_* can reset this
minimum = max(minimum - 1, 0)
return tree
def parse__gen3(ctx):
current = ctx.tokens.current()
rule = table[0][current.id] if current else -1
tree = ParseTree(NonTerminal(46, '_gen3'))
ctx.nonterminal = "_gen3"
if current != None and current.id in nonterminal_follow[46] and current.id not in nonterminal_first[46]:
return tree
if current == None:
return tree
if rule == 16: # $_gen3 = $regex_options
ctx.rule = rules[16]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_associativity(ctx):
current = ctx.tokens.current()
rule = table[1][current.id] if current else -1
tree = ParseTree(NonTerminal(47, 'associativity'))
ctx.nonterminal = "associativity"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 70: # $associativity = :left
ctx.rule = rules[70]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 22) # :left
tree.add(t)
return tree
elif rule == 71: # $associativity = :right
ctx.rule = rules[71]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 24) # :right
tree.add(t)
return tree
elif rule == 72: # $associativity = :unary
ctx.rule = rules[72]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 4) # :unary
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[47] if x >=0],
rules[72]
)
def parse__gen8(ctx):
current = ctx.tokens.current()
rule = table[2][current.id] if current else -1
tree = ParseTree(NonTerminal(48, '_gen8'))
ctx.nonterminal = "_gen8"
if current != None and current.id in nonterminal_follow[48] and current.id not in nonterminal_first[48]:
return tree
if current == None:
return tree
if rule == 29: # $_gen8 = $terminal
ctx.rule = rules[29]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser_ll1(ctx):
current = ctx.tokens.current()
rule = table[3][current.id] if current else -1
tree = ParseTree(NonTerminal(49, 'parser_ll1'))
ctx.nonterminal = "parser_ll1"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 44: # $parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )
ctx.rule = rules[44]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('Parser', ast_parameters)
t = expect(ctx, 36) # :parser
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen10(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[49] if x >=0],
rules[44]
)
def parse_morpheme(ctx):
current = ctx.tokens.current()
rule = table[4][current.id] if current else -1
tree = ParseTree(NonTerminal(50, 'morpheme'))
ctx.nonterminal = "morpheme"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 73: # $morpheme = :terminal
ctx.rule = rules[73]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34) # :terminal
tree.add(t)
return tree
elif rule == 74: # $morpheme = :nonterminal
ctx.rule = rules[74]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
return tree
elif rule == 75: # $morpheme = $macro
ctx.rule = rules[75]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_macro(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[50] if x >=0],
rules[75]
)
def parse_macro_parameter(ctx):
current = ctx.tokens.current()
rule = table[5][current.id] if current else -1
tree = ParseTree(NonTerminal(51, 'macro_parameter'))
ctx.nonterminal = "macro_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 83: # $macro_parameter = :nonterminal
ctx.rule = rules[83]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
return tree
elif rule == 84: # $macro_parameter = :terminal
ctx.rule = rules[84]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34) # :terminal
tree.add(t)
return tree
elif rule == 85: # $macro_parameter = :string
ctx.rule = rules[85]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 31) # :string
tree.add(t)
return tree
elif rule == 86: # $macro_parameter = :integer
ctx.rule = rules[86]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 21) # :integer
tree.add(t)
return tree
elif rule == 87: # $macro_parameter = :null
ctx.rule = rules[87]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[51] if x >=0],
rules[87]
)
def parse__gen13(ctx):
current = ctx.tokens.current()
rule = table[6][current.id] if current else -1
tree = ParseTree(NonTerminal(52, '_gen13'))
ctx.nonterminal = "_gen13"
if current != None and current.id in nonterminal_follow[52] and current.id not in nonterminal_first[52]:
return tree
if current == None:
return tree
if rule == 49: # $_gen13 = $ast_transform
ctx.rule = rules[49]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_ast_transform(ctx)
tree.add(subtree)
return tree
return tree
def parse__gen16(ctx):
current = ctx.tokens.current()
rule = table[7][current.id] if current else -1
tree = ParseTree(NonTerminal(53, '_gen16'))
ctx.nonterminal = "_gen16"
if current != None and current.id in nonterminal_follow[53] and current.id not in nonterminal_first[53]:
return tree
if current == None:
return tree
if rule == 59: # $_gen16 = $led
ctx.rule = rules[59]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_led(ctx)
tree.add(subtree)
return tree
return tree
def parse_lexer_atom(ctx):
current = ctx.tokens.current()
rule = table[8][current.id] if current else -1
tree = ParseTree(NonTerminal(54, 'lexer_atom'))
ctx.nonterminal = "lexer_atom"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 7: # $lexer_atom = $lexer_regex
ctx.rule = rules[7]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_regex(ctx)
tree.add(subtree)
return tree
elif rule == 8: # $lexer_atom = $lexer_mode
ctx.rule = rules[8]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_mode(ctx)
tree.add(subtree)
return tree
elif rule == 9: # $lexer_atom = $lexer_partials
ctx.rule = rules[9]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_partials(ctx)
tree.add(subtree)
return tree
elif rule == 10: # $lexer_atom = $lexer_code
ctx.rule = rules[10]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_code(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[54] if x >=0],
rules[10]
)
def parse_lexer_mode(ctx):
current = ctx.tokens.current()
rule = table[9][current.id] if current else -1
tree = ParseTree(NonTerminal(55, 'lexer_mode'))
ctx.nonterminal = "lexer_mode"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 40: # $lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )
ctx.rule = rules[40]
ast_parameters = OrderedDict([
('name', 2),
('atoms', 5),
])
tree.astTransform = AstTransformNodeCreator('Mode', ast_parameters)
t = expect(ctx, 43) # :mode
tree.add(t)
t = expect(ctx, 10) # :langle
tree.add(t)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 19) # :rangle
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[55] if x >=0],
rules[40]
)
def parse_nud(ctx):
current = ctx.tokens.current()
rule = table[10][current.id] if current else -1
tree = ParseTree(NonTerminal(56, 'nud'))
ctx.nonterminal = "nud"
if current != None and current.id in nonterminal_follow[56] and current.id not in nonterminal_first[56]:
return tree
if current == None:
return tree
if rule == 64: # $nud = $_gen12
ctx.rule = rules[64]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_transform_sub(ctx):
current = ctx.tokens.current()
rule = table[11][current.id] if current else -1
tree = ParseTree(NonTerminal(57, 'ast_transform_sub'))
ctx.nonterminal = "ast_transform_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 78: # $ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )
ctx.rule = rules[78]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('AstTransformation', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen17(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
elif rule == 79: # $ast_transform_sub = :nonterminal_reference
ctx.rule = rules[79]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 44) # :nonterminal_reference
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[57] if x >=0],
rules[79]
)
def parse_enumerated_regex(ctx):
current = ctx.tokens.current()
rule = table[12][current.id] if current else -1
tree = ParseTree(NonTerminal(58, 'enumerated_regex'))
ctx.nonterminal = "enumerated_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 21: # $enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )
ctx.rule = rules[21]
ast_parameters = OrderedDict([
('enums', 2),
('onmatch', 5),
])
tree.astTransform = AstTransformNodeCreator('EnumeratedRegex', ast_parameters)
t = expect(ctx, 0) # :regex_enum
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen5(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[58] if x >=0],
rules[21]
)
def parse_body_element_sub(ctx):
current = ctx.tokens.current()
rule = table[13][current.id] if current else -1
tree = ParseTree(NonTerminal(59, 'body_element_sub'))
ctx.nonterminal = "body_element_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 3: # $body_element_sub = $lexer
ctx.rule = rules[3]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer(ctx)
tree.add(subtree)
return tree
elif rule == 4: # $body_element_sub = $parser
ctx.rule = rules[4]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[59] if x >=0],
rules[4]
)
def parse__gen6(ctx):
current = ctx.tokens.current()
rule = table[14][current.id] if current else -1
tree = ParseTree(NonTerminal(60, '_gen6'))
ctx.nonterminal = "_gen6"
if current != None and current.id in nonterminal_follow[60] and current.id not in nonterminal_first[60]:
return tree
if current == None:
return tree
if rule == 22: # $_gen6 = $regex_enumeration_options
ctx.rule = rules[22]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_enumeration_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_enumeration_options(ctx):
current = ctx.tokens.current()
rule = table[18][current.id] if current else -1
tree = ParseTree(NonTerminal(64, 'regex_enumeration_options'))
ctx.nonterminal = "regex_enumeration_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 26: # $regex_enumeration_options = :lparen $_gen7 :rparen -> $1
ctx.rule = rules[26]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[64] if x >=0],
rules[26]
)
def parse_terminal(ctx):
current = ctx.tokens.current()
rule = table[19][current.id] if current else -1
tree = ParseTree(NonTerminal(65, 'terminal'))
ctx.nonterminal = "terminal"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 36: # $terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )
ctx.rule = rules[36]
ast_parameters = OrderedDict([
('name', 0),
('group', 1),
])
tree.astTransform = AstTransformNodeCreator('Terminal', ast_parameters)
t = expect(ctx, 34) # :terminal
tree.add(t)
subtree = parse__gen9(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[65] if x >=0],
rules[36]
)
def parse_expression_rule(ctx):
current = ctx.tokens.current()
rule = table[20][current.id] if current else -1
tree = ParseTree(NonTerminal(66, 'expression_rule'))
ctx.nonterminal = "expression_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 58: # $expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )
ctx.rule = rules[58]
ast_parameters = OrderedDict([
('precedence', 0),
('nonterminal', 2),
('production', 4),
])
tree.astTransform = AstTransformNodeCreator('ExpressionRule', ast_parameters)
subtree = parse__gen15(ctx)
tree.add(subtree)
t = expect(ctx, 12) # :expr_rule_hint
tree.add(t)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
subtree = parse_expression_rule_production(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[66] if x >=0],
rules[58]
)
def parse_binding_power_marker(ctx):
current = ctx.tokens.current()
rule = table[21][current.id] if current else -1
tree = ParseTree(NonTerminal(67, 'binding_power_marker'))
ctx.nonterminal = "binding_power_marker"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 68: # $binding_power_marker = :asterisk
ctx.rule = rules[68]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 29) # :asterisk
tree.add(t)
return tree
elif rule == 69: # $binding_power_marker = :dash
ctx.rule = rules[69]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 1) # :dash
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[67] if x >=0],
rules[69]
)
def parse_ll1_rule(ctx):
current = ctx.tokens.current()
rule = table[22][current.id] if current else -1
tree = ParseTree(NonTerminal(68, 'll1_rule'))
ctx.nonterminal = "ll1_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 45: # $ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )
ctx.rule = rules[45]
ast_parameters = OrderedDict([
('nonterminal', 1),
('production', 3),
])
tree.astTransform = AstTransformNodeCreator('Rule', ast_parameters)
t = expect(ctx, 30) # :ll1_rule_hint
tree.add(t)
t = expect(ctx, 42) # :nonterminal
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
subtree = parse_ll1_rule_rhs(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[68] if x >=0],
rules[45]
)
def parse_lexer_code(ctx):
current = ctx.tokens.current()
rule = table[24][current.id] if current else -1
tree = ParseTree(NonTerminal(70, 'lexer_code'))
ctx.nonterminal = "lexer_code"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 11: # $lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )
ctx.rule = rules[11]
ast_parameters = OrderedDict([
('language', 1),
('code', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerCode', ast_parameters)
t = expect(ctx, 9) # :code_start
tree.add(t)
t = expect(ctx, 20) # :language
tree.add(t)
t = expect(ctx, 16) # :code
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[70] if x >=0],
rules[11]
)
def parse_expression_rule_production(ctx):
current = ctx.tokens.current()
rule = table[25][current.id] if current else -1
tree = ParseTree(NonTerminal(71, 'expression_rule_production'))
ctx.nonterminal = "expression_rule_production"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 61: # $expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )
ctx.rule = rules[61]
ast_parameters = OrderedDict([
('nud', 1),
('nud_ast', 2),
('led', 3),
('ast', 4),
])
tree.astTransform = AstTransformNodeCreator('MixfixProduction', ast_parameters)
t = expect(ctx, 25) # :mixfix_rule_hint
tree.add(t)
subtree = parse_nud(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
subtree = parse__gen16(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 62: # $expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )
ctx.rule = rules[62]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('PrefixProduction', ast_parameters)
t = expect(ctx, 28) # :prefix_rule_hint
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 63: # $expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )
ctx.rule = rules[63]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('InfixProduction', ast_parameters)
t = expect(ctx, 6) # :infix_rule_hint
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[71] if x >=0],
rules[63]
)
def parse_rule(ctx):
current = ctx.tokens.current()
rule = table[26][current.id] if current else -1
tree = ParseTree(NonTerminal(72, 'rule'))
ctx.nonterminal = "rule"
if current != None and current.id in nonterminal_follow[72] and current.id not in nonterminal_first[72]:
return tree
if current == None:
return tree
if rule == 51: # $rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )
ctx.rule = rules[51]
ast_parameters = OrderedDict([
('morphemes', 0),
('ast', 1),
])
tree.astTransform = AstTransformNodeCreator('Production', ast_parameters)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_parameter(ctx):
current = ctx.tokens.current()
rule = table[27][current.id] if current else -1
tree = ParseTree(NonTerminal(73, 'ast_parameter'))
ctx.nonterminal = "ast_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 80: # $ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )
ctx.rule = rules[80]
ast_parameters = OrderedDict([
('name', 0),
('index', 2),
])
tree.astTransform = AstTransformNodeCreator('AstParameter', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 7) # :equals
tree.add(t)
t = expect(ctx, 44) # :nonterminal_reference
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[73] if x >=0],
rules[80]
)
def parse_body_element(ctx):
current = ctx.tokens.current()
rule = table[28][current.id] if current else -1
tree = ParseTree(NonTerminal(74, 'body_element'))
ctx.nonterminal = "body_element"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 2: # $body_element = $body_element_sub
ctx.rule = rules[2]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_body_element_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[74] if x >=0],
rules[2]
)
def parse_match_group(ctx):
current = ctx.tokens.current()
rule = table[30][current.id] if current else -1
tree = ParseTree(NonTerminal(76, 'match_group'))
ctx.nonterminal = "match_group"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 37: # $match_group = :lsquare :integer :rsquare -> $1
ctx.rule = rules[37]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 35) # :lsquare
tree.add(t)
t = expect(ctx, 21) # :integer
tree.add(t)
t = expect(ctx, 5) # :rsquare
tree.add(t)
return tree
elif rule == 38: # $match_group = :no_group
ctx.rule = rules[38]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 11) # :no_group
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[76] if x >=0],
rules[38]
)
def parse_binding_power(ctx):
current = ctx.tokens.current()
rule = table[32][current.id] if current else -1
tree = ParseTree(NonTerminal(78, 'binding_power'))
ctx.nonterminal = "binding_power"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 66: # $binding_power = :lparen $precedence :rparen -> $1
ctx.rule = rules[66]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse_precedence(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[78] if x >=0],
rules[66]
)
def parse_regex_enumeration(ctx):
current = ctx.tokens.current()
rule = table[33][current.id] if current else -1
tree = ParseTree(NonTerminal(79, 'regex_enumeration'))
ctx.nonterminal = "regex_enumeration"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 24: # $regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )
ctx.rule = rules[24]
ast_parameters = OrderedDict([
('language', 0),
('regex', 2),
('options', 3),
])
tree.astTransform = AstTransformNodeCreator('RegexEnum', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 26) # :colon
tree.add(t)
t = expect(ctx, 14) # :regex
tree.add(t)
subtree = parse__gen6(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[79] if x >=0],
rules[24]
)
def parse_ll1_rule_rhs(ctx):
current = ctx.tokens.current()
rule = table[34][current.id] if current else -1
tree = ParseTree(NonTerminal(80, 'll1_rule_rhs'))
ctx.nonterminal = "ll1_rule_rhs"
if current != None and current.id in nonterminal_follow[80] and current.id not in nonterminal_first[80]:
return tree
if current == None:
return tree
if rule == 47: # $ll1_rule_rhs = $_gen11
ctx.rule = rules[47]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen11(ctx)
tree.add(subtree)
return tree
elif rule == 52: # $ll1_rule_rhs = :null -> NullProduction( )
ctx.rule = rules[52]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('NullProduction', ast_parameters)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
elif rule == 53: # $ll1_rule_rhs = $parser
ctx.rule = rules[53]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_options(ctx):
current = ctx.tokens.current()
rule = table[38][current.id] if current else -1
tree = ParseTree(NonTerminal(84, 'regex_options'))
ctx.nonterminal = "regex_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 27: # $regex_options = :lbrace $_gen7 :rbrace -> $1
ctx.rule = rules[27]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[84] if x >=0],
rules[27]
)
def parse_regex_partial(ctx):
current = ctx.tokens.current()
rule = table[39][current.id] if current else -1
tree = ParseTree(NonTerminal(85, 'regex_partial'))
ctx.nonterminal = "regex_partial"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 14: # $regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )
ctx.rule = rules[14]
ast_parameters = OrderedDict([
('regex', 0),
('name', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartial', ast_parameters)
t = expect(ctx, 14) # :regex
tree.add(t)
t = expect(ctx, 3) # :arrow
tree.add(t)
t = expect(ctx, 18) # :regex_partial
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[85] if x >=0],
rules[14]
)
def parse_led(ctx):
current = ctx.tokens.current()
rule = table[40][current.id] if current else -1
tree = ParseTree(NonTerminal(86, 'led'))
ctx.nonterminal = "led"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 65: # $led = :expression_divider $_gen12 -> $1
ctx.rule = rules[65]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 27) # :expression_divider
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[86] if x >=0],
rules[65]
)
def parse_precedence(ctx):
current = ctx.tokens.current()
rule = table[41][current.id] if current else -1
tree = ParseTree(NonTerminal(87, 'precedence'))
ctx.nonterminal = "precedence"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 67: # $precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )
ctx.rule = rules[67]
ast_parameters = OrderedDict([
('marker', 0),
('associativity', 2),
])
tree.astTransform = AstTransformNodeCreator('Precedence', ast_parameters)
subtree = parse_binding_power_marker(ctx)
tree.add(subtree)
t = expect(ctx, 26) # :colon
tree.add(t)
subtree = parse_associativity(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[87] if x >=0],
rules[67]
)
def parse_parser_expression(ctx):
current = ctx.tokens.current()
rule = table[42][current.id] if current else -1
tree = ParseTree(NonTerminal(88, 'parser_expression'))
ctx.nonterminal = "parser_expression"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 55: # $parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )
ctx.rule = rules[55]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('ExpressionParser', ast_parameters)
t = expect(ctx, 41) # :parser_expression
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen14(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[88] if x >=0],
rules[55]
)
def parse_lexer(ctx):
current = ctx.tokens.current()
rule = table[43][current.id] if current else -1
tree = ParseTree(NonTerminal(89, 'lexer'))
ctx.nonterminal = "lexer"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 6: # $lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )
ctx.rule = rules[6]
ast_parameters = OrderedDict([
('atoms', 2),
])
tree.astTransform = AstTransformNodeCreator('Lexer', ast_parameters)
t = expect(ctx, 32) # :lexer
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[89] if x >=0],
rules[6]
)
def parse_macro(ctx):
current = ctx.tokens.current()
rule = table[44][current.id] if current else -1
tree = ParseTree(NonTerminal(90, 'macro'))
ctx.nonterminal = "macro"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 82: # $macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )
ctx.rule = rules[82]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('Macro', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen18(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[90] if x >=0],
rules[82]
)
def parse_lexer_regex(ctx):
current = ctx.tokens.current()
rule = table[45][current.id] if current else -1
tree = ParseTree(NonTerminal(91, 'lexer_regex'))
ctx.nonterminal = "lexer_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 15: # $lexer_regex = $enumerated_regex
ctx.rule = rules[15]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_enumerated_regex(ctx)
tree.add(subtree)
return tree
elif rule == 19: # $lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )
ctx.rule = rules[19]
ast_parameters = OrderedDict([
('regex', 0),
('options', 1),
('onmatch', 3),
])
tree.astTransform = AstTransformNodeCreator('Regex', ast_parameters)
t = expect(ctx, 14) # :regex
tree.add(t)
subtree = parse__gen3(ctx)
tree.add(subtree)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[91] if x >=0],
rules[19]
)
def parse__gen9(ctx):
current = ctx.tokens.current()
rule = table[46][current.id] if current else -1
tree = ParseTree(NonTerminal(92, '_gen9'))
ctx.nonterminal = "_gen9"
if current != None and current.id in nonterminal_follow[92] and current.id not in nonterminal_first[92]:
return tree
if current == None:
return tree
if rule == 34: # $_gen9 = $match_group
ctx.rule = rules[34]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_match_group(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser(ctx):
current = ctx.tokens.current()
rule = table[48][current.id] if current else -1
tree = ParseTree(NonTerminal(94, 'parser'))
ctx.nonterminal = "parser"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 41: # $parser = $parser_ll1
ctx.rule = rules[41]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_ll1(ctx)
tree.add(subtree)
return tree
elif rule == 42: # $parser = $parser_expression
ctx.rule = rules[42]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_expression(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[94] if x >=0],
rules[42]
)
def parse_lexer_target(ctx):
current = ctx.tokens.current()
rule = table[49][current.id] if current else -1
tree = ParseTree(NonTerminal(95, 'lexer_target'))
ctx.nonterminal = "lexer_target"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 28: # $lexer_target = $terminal
ctx.rule = rules[28]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
elif rule == 31: # $lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )
ctx.rule = rules[31]
ast_parameters = OrderedDict([
('name', 0),
('terminal', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerFunctionCall', ast_parameters)
t = expect(ctx, 17) # :identifier
tree.add(t)
t = expect(ctx, 37) # :lparen
tree.add(t)
subtree = parse__gen8(ctx)
tree.add(subtree)
t = expect(ctx, 23) # :rparen
tree.add(t)
return tree
elif rule == 32: # $lexer_target = :stack_push
ctx.rule = rules[32]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 8) # :stack_push
tree.add(t)
return tree
elif rule == 33: # $lexer_target = :action
ctx.rule = rules[33]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 39) # :action
tree.add(t)
return tree
elif rule == 39: # $lexer_target = :null -> Null( )
ctx.rule = rules[39]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('Null', ast_parameters)
t = expect(ctx, 45) # :null
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[95] if x >=0],
rules[39]
)
def parse_grammar(ctx):
current = ctx.tokens.current()
rule = table[51][current.id] if current else -1
tree = ParseTree(NonTerminal(97, 'grammar'))
ctx.nonterminal = "grammar"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 1: # $grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )
ctx.rule = rules[1]
ast_parameters = OrderedDict([
('body', 2),
])
tree.astTransform = AstTransformNodeCreator('Grammar', ast_parameters)
t = expect(ctx, 33) # :grammar
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen0(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[97] if x >=0],
rules[1]
)
def parse_lexer_partials(ctx):
current = ctx.tokens.current()
rule = table[52][current.id] if current else -1
tree = ParseTree(NonTerminal(98, 'lexer_partials'))
ctx.nonterminal = "lexer_partials"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 13: # $lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )
ctx.rule = rules[13]
ast_parameters = OrderedDict([
('list', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartials', ast_parameters)
t = expect(ctx, 13) # :partials
tree.add(t)
t = expect(ctx, 2) # :lbrace
tree.add(t)
subtree = parse__gen2(ctx)
tree.add(subtree)
t = expect(ctx, 15) # :rbrace
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[98] if x >=0],
rules[13]
)
def parse_ast_transform(ctx):
current = ctx.tokens.current()
rule = table[53][current.id] if current else -1
tree = ParseTree(NonTerminal(99, 'ast_transform'))
ctx.nonterminal = "ast_transform"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 76: # $ast_transform = :arrow $ast_transform_sub -> $1
ctx.rule = rules[76]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 3) # :arrow
tree.add(t)
subtree = parse_ast_transform_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[99] if x >=0],
rules[76]
)
def parse__gen15(ctx):
current = ctx.tokens.current()
rule = table[55][current.id] if current else -1
tree = ParseTree(NonTerminal(101, '_gen15'))
ctx.nonterminal = "_gen15"
if current != None and current.id in nonterminal_follow[101] and current.id not in nonterminal_first[101]:
return tree
if current == None:
return tree
if rule == 56: # $_gen15 = $binding_power
ctx.rule = rules[56]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_binding_power(ctx)
tree.add(subtree)
return tree
return tree
# Lexer Code #
# START USER CODE
# END USER CODE
def emit(ctx, terminal, source_string, line, col):
if terminal:
ctx.tokens.append(Terminal(terminals[terminal], terminal, source_string, ctx.resource, line, col))
def default_action(ctx, terminal, source_string, line, col):
emit(ctx, terminal, source_string, line, col)
def init():
return {}
def destroy(context):
pass
class LexerStackPush:
def __init__(self, mode):
self.mode = mode
class LexerAction:
def __init__(self, action):
self.action = action
class LexerContext:
def __init__(self, string, resource, errors, user_context):
self.__dict__.update(locals())
self.stack = ['default']
self.line = 1
self.col = 1
self.tokens = []
self.user_context = user_context
self.re_match = None # https://docs.python.org/3/library/re.html#match-objects
class HermesLexer:
regex = {
'default': OrderedDict([
(re.compile(r'(grammar)\s*({)'), [
# (terminal, group, function)
('grammar', 1, None),
('lbrace', 2, None),
LexerStackPush('grammar'),
]),
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
]),
'grammar': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'lexer'), [
# (terminal, group, function)
('lexer', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'parser'), [
# (terminal, group, function)
('parser', 0, None),
LexerStackPush('parser'),
]),
]),
'lexer': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'code<([a-z]+)>\s*<<\s*([a-zA-Z_]+)(?=\s)(.*?)(\2)', re.DOTALL), [
# (terminal, group, function)
('code_start', 2, None),
('language', 1, None),
('code', 3, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'<'), [
# (terminal, group, function)
('langle', 0, None),
]),
(re.compile(r'>'), [
# (terminal, group, function)
('rangle', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r'\[\]'), [
# (terminal, group, function)
('no_group', 0, None),
]),
(re.compile(r'\['), [
# (terminal, group, function)
('lsquare', 0, None),
]),
(re.compile(r'\]'), [
# (terminal, group, function)
('rsquare', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
# (terminal, group, function)
('regex', 0, None),
LexerStackPush('regex_options'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('stack_push', 1, None),
]),
(re.compile(r'%([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('action', 1, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'_[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('regex_partial', 0, None),
]),
(re.compile(r'null'), [
# (terminal, group, function)
('null', 0, None),
]),
(re.compile(r'mode'), [
# (terminal, group, function)
('mode', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'partials'), [
# (terminal, group, function)
('partials', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'enum'), [
# (terminal, group, function)
('regex_enum', 0, None),
LexerStackPush('regex_enum'),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
]),
'regex_enum': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r':'), [
# (terminal, group, function)
('colon', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
# (terminal, group, function)
('regex', 0, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
]),
'regex_options': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
LexerAction('pop'),
]),
]),
'parser': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'null'), [
# (terminal, group, function)
('null', 0, None),
]),
(re.compile(r'parser\s*<\s*expression\s*>\s*({)'), [
# (terminal, group, function)
('parser_expression', None, None),
('lbrace', 1, None),
LexerStackPush('parser_expr'),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)(?=\s*\=)'), [
# (terminal, group, function)
('ll1_rule_hint', None, None),
('nonterminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'parser_expr': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'(\()(?=\s*[\*-])'), [
# (terminal, group, function)
('lparen', 1, None),
LexerStackPush('binding_power'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'<=>'), [
# (terminal, group, function)
('expression_divider', 0, None),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*\1[ \t]+:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?![ \t]+(:|\$))'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('infix_rule_hint', None, None),
('nonterminal', 2, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('prefix_rule_hint', None, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)\s*(=)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 1, None),
('equals', 2, None),
('mixfix_rule_hint', None, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'binding_power': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\*'), [
# (terminal, group, function)
('asterisk', 0, None),
]),
(re.compile(r'-'), [
# (terminal, group, function)
('dash', 0, None),
]),
(re.compile(r':'), [
# (terminal, group, function)
('colon', 0, None),
]),
(re.compile(r'left'), [
# (terminal, group, function)
('left', 0, None),
]),
(re.compile(r'right'), [
# (terminal, group, function)
('right', 0, None),
]),
(re.compile(r'unary'), [
# (terminal, group, function)
('unary', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
LexerAction('pop'),
]),
]),
}
def _advance_line_col(self, string, length, line, col):
for i in range(length):
if string[i] == '\n':
line += 1
col = 1
else:
col += 1
return (line, col)
def _advance_string(self, ctx, string):
(ctx.line, ctx.col) = self._advance_line_col(string, len(string), ctx.line, ctx.col)
ctx.string = ctx.string[len(string):]
def _next(self, ctx, debug=False):
for regex, outputs in self.regex[ctx.stack[-1]].items():
if debug:
from xtermcolor import colorize
token_count = len(ctx.tokens)
print('{1} ({2}, {3}) regex: {0}'.format(
colorize(regex.pattern, ansi=40), colorize(ctx.string[:20].replace('\n', '\\n'), ansi=15), ctx.line, ctx.col)
)
match = regex.match(ctx.string)
if match:
ctx.re_match = match
for output in outputs:
if isinstance(output, tuple):
(terminal, group, function) = output
function = function if function else default_action
source_string = match.group(group) if group is not None else ''
(group_line, group_col) = self._advance_line_col(ctx.string, match.start(group) if group else 0, ctx.line, ctx.col)
function(
ctx,
terminal,
source_string,
group_line,
group_col
)
if debug:
print(' matched: {}'.format(colorize(match.group(0).replace('\n', '\\n'), ansi=3)))
for token in ctx.tokens[token_count:]:
print(' emit: [{}] [{}, {}] [{}] stack:{} context:{}'.format(
colorize(token.str, ansi=9),
colorize(str(token.line), ansi=5),
colorize(str(token.col), ansi=5),
colorize(token.source_string, ansi=3),
colorize(str(ctx.stack), ansi=4),
colorize(str(ctx.user_context), ansi=13)
))
token_count = len(ctx.tokens)
if isinstance(output, LexerStackPush):
ctx.stack.append(output.mode)
if debug:
print(' push on stack: {}'.format(colorize(output.mode, ansi=4)))
if isinstance(output, LexerAction):
if output.action == 'pop':
mode = ctx.stack.pop()
if debug:
print(' pop off stack: {}'.format(colorize(mode, ansi=4)))
self._advance_string(ctx, match.group(0))
return len(match.group(0)) > 0
return False
def lex(self, string, resource, errors=None, debug=False):
if errors is None:
errors = DefaultSyntaxErrorHandler()
string_copy = string
user_context = init()
ctx = LexerContext(string, resource, errors, user_context)
while len(ctx.string):
matched = self._next(ctx, debug)
if matched == False:
raise ctx.errors.unrecognized_token(string_copy, ctx.line, ctx.col)
destroy(ctx.user_context)
return ctx.tokens
def lex(source, resource, errors=None, debug=False):
return TokenStream(HermesLexer().lex(source, resource, errors, debug))
| 38.088612 | 189 | 0.520529 |
import sys
import os
import re
import base64
import argparse
from collections import OrderedDict
def parse_tree_string(parsetree, indent=None, b64_source=True, indent_level=0, debug=False):
indent_str = (' ' * indent * indent_level) if indent else ''
if isinstance(parsetree, ParseTree):
children = [parse_tree_string(child, indent, b64_source, indent_level+1, debug) for child in parsetree.children]
debug_str = parsetree.debug_str() if debug else ''
if indent is None or len(children) == 0:
return '{0}({1}: {2}{3})'.format(indent_str, parsetree.nonterminal, debug_str, ', '.join(children))
else:
return '{0}({1}:{2}\n{3}\n{4})'.format(
indent_str,
parsetree.nonterminal,
debug_str,
',\n'.join(children),
indent_str
)
elif isinstance(parsetree, Terminal):
return indent_str + parsetree.dumps(b64_source=b64_source)
def ast_string(ast, indent=None, b64_source=True, indent_level=0):
indent_str = (' ' * indent * indent_level) if indent else ''
next_indent_str = (' ' * indent * (indent_level+1)) if indent else ''
if isinstance(ast, Ast):
children = OrderedDict([(k, ast_string(v, indent, b64_source, indent_level+1)) for k, v in ast.attributes.items()])
if indent is None:
return '({0}: {1})'.format(
ast.name,
', '.join('{0}={1}'.format(k, v) for k, v in children.items())
)
else:
return '({0}:\n{1}\n{2})'.format(
ast.name,
',\n'.join(['{0}{1}={2}'.format(next_indent_str, k, v) for k, v in children.items()]),
indent_str
)
elif isinstance(ast, list):
children = [ast_string(element, indent, b64_source, indent_level+1) for element in ast]
if indent is None or len(children) == 0:
return '[{0}]'.format(', '.join(children))
else:
return '[\n{1}\n{0}]'.format(
indent_str,
',\n'.join(['{0}{1}'.format(next_indent_str, child) for child in children]),
)
elif isinstance(ast, Terminal):
return ast.dumps(b64_source=b64_source)
class Terminal:
def __init__(self, id, str, source_string, resource, line, col):
self.__dict__.update(locals())
def getId(self):
return self.id
def ast(self):
return self
def dumps(self, b64_source=True, **kwargs):
source_string = base64.b64encode(self.source_string.encode('utf-8')).decode('utf-8') if b64_source else self.source_string
return '<{resource}:{line}:{col} {terminal} "{source}">'.format(
resource=self.resource,
line=self.line,
col=self.col,
terminal=self.str,
source=source_string
)
def __str__(self):
return self.dumps()
class NonTerminal():
def __init__(self, id, str):
self.__dict__.update(locals())
self.list = False
def __str__(self):
return self.str
class AstTransform:
pass
class AstTransformSubstitution(AstTransform):
def __init__(self, idx):
self.__dict__.update(locals())
def __repr__(self):
return '$' + str(self.idx)
def __str__(self):
return self.__repr__()
class AstTransformNodeCreator(AstTransform):
def __init__( self, name, parameters ):
self.__dict__.update(locals())
def __repr__( self ):
return self.name + '( ' + ', '.join(['%s=$%s' % (k,str(v)) for k,v in self.parameters.items()]) + ' )'
def __str__(self):
return self.__repr__()
class AstList(list):
def ast(self):
retval = []
for ast in self:
retval.append(ast.ast())
return retval
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class ParseTree():
def __init__(self, nonterminal):
self.__dict__.update(locals())
self.children = []
self.astTransform = None
self.isExpr = False
self.isNud = False
self.isPrefix = False
self.isInfix = False
self.nudMorphemeCount = 0
self.isExprNud = False
self.list_separator_id = None
self.list = False
def debug_str(self):
from copy import deepcopy
def h(v):
if v == False or v is None:
return str(v)
from xtermcolor import colorize
return colorize(str(v), ansi=190)
d = deepcopy(self.__dict__)
for key in ['self', 'nonterminal', 'children']:
del d[key]
f = {k: v for k, v in d.items() if v != False and v is not None}
return ' [{}]'.format(', '.join(['{}={}'.format(k,h(v)) for k,v in f.items()]))
def add(self, tree):
self.children.append( tree )
def ast(self):
if self.list == True:
r = AstList()
if len(self.children) == 0:
return r
for child in self.children:
if isinstance(child, Terminal) and self.list_separator_id is not None and child.id == self.list_separator_id:
continue
r.append(child.ast())
return r
elif self.isExpr:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
if idx == '$':
child = self.children[0]
elif isinstance(self.children[0], ParseTree) and \
self.children[0].isNud and \
not self.children[0].isPrefix and \
not self.isExprNud and \
not self.isInfix:
if idx < self.children[0].nudMorphemeCount:
child = self.children[0].children[idx]
else:
index = idx - self.children[0].nudMorphemeCount + 1
child = self.children[index]
elif len(self.children) == 1 and not isinstance(self.children[0], ParseTree) and not isinstance(self.children[0], list):
return self.children[0]
else:
child = self.children[idx]
parameters[name] = child.ast()
return Ast(self.astTransform.name, parameters)
else:
if isinstance(self.astTransform, AstTransformSubstitution):
return self.children[self.astTransform.idx].ast()
elif isinstance(self.astTransform, AstTransformNodeCreator):
parameters = OrderedDict()
for name, idx in self.astTransform.parameters.items():
parameters[name] = self.children[idx].ast()
return Ast(self.astTransform.name, parameters)
elif len(self.children):
return self.children[0].ast()
else:
return None
def dumps(self, indent=None, b64_source=True, debug=False):
args = locals()
del args['self']
return parse_tree_string(self, **args)
class Ast():
def __init__(self, name, attributes):
self.__dict__.update(locals())
def attr(self, attr):
return self.attributes[attr]
def dumps(self, indent=None, b64_source=True):
args = locals()
del args['self']
return ast_string(self, **args)
class SyntaxError(Exception):
def __init__(self, message):
self.__dict__.update(locals())
def __str__(self):
return self.message
class TokenStream(list):
def __init__(self, arg=[]):
super(TokenStream, self).__init__(arg)
self.index = 0
def advance(self):
self.index += 1
return self.current()
def last(self):
return self[-1]
def current(self):
try:
return self[self.index]
except IndexError:
return None
class DefaultSyntaxErrorHandler:
def __init__(self):
self.errors = []
def _error(self, string):
error = SyntaxError(string)
self.errors.append(error)
return error
def unexpected_eof(self):
return self._error("Error: unexpected end of file")
def excess_tokens(self):
return self._error("Finished parsing without consuming all tokens.")
def unexpected_symbol(self, nonterminal, actual_terminal, expected_terminals, rule):
return self._error("Unexpected symbol (line {line}, col {col}) when parsing parse_{nt}. Expected {expected}, got {actual}.".format(
line=actual_terminal.line,
col=actual_terminal.col,
nt=nonterminal,
expected=', '.join(expected_terminals),
actual=actual_terminal
))
def no_more_tokens(self, nonterminal, expected_terminal, last_terminal):
return self._error("No more tokens. Expecting " + expected_terminal)
def invalid_terminal(self, nonterminal, invalid_terminal):
return self._error("Invalid symbol ID: {} ({})".format(invalid_terminal.id, invalid_terminal.string))
def unrecognized_token(self, string, line, col):
lines = string.split('\n')
bad_line = lines[line-1]
return self._error('Unrecognized token on line {}, column {}:\n\n{}\n{}'.format(
line, col, bad_line, ''.join([' ' for x in range(col-1)]) + '^'
))
def missing_list_items(self, method, required, found, last):
return self._error("List for {} requires {} items but only {} were found.".format(method, required, found))
def missing_terminator(self, method, terminator, last):
return self._error("List for "+method+" is missing a terminator")
class ParserContext:
def __init__(self, tokens, errors):
self.__dict__.update(locals())
self.nonterminal_string = None
self.rule_string = None
terminals = {
0: 'regex_enum',
1: 'dash',
2: 'lbrace',
3: 'arrow',
4: 'unary',
5: 'rsquare',
6: 'infix_rule_hint',
7: 'equals',
8: 'stack_push',
9: 'code_start',
10: 'langle',
11: 'no_group',
12: 'expr_rule_hint',
13: 'partials',
14: 'regex',
15: 'rbrace',
16: 'code',
17: 'identifier',
18: 'regex_partial',
19: 'rangle',
20: 'language',
21: 'integer',
22: 'left',
23: 'rparen',
24: 'right',
25: 'mixfix_rule_hint',
26: 'colon',
27: 'expression_divider',
28: 'prefix_rule_hint',
29: 'asterisk',
30: 'll1_rule_hint',
31: 'string',
32: 'lexer',
33: 'grammar',
34: 'terminal',
35: 'lsquare',
36: 'parser',
37: 'lparen',
38: 'comma',
39: 'action',
40: 'pipe',
41: 'parser_expression',
42: 'nonterminal',
43: 'mode',
44: 'nonterminal_reference',
45: 'null',
'regex_enum': 0,
'dash': 1,
'lbrace': 2,
'arrow': 3,
'unary': 4,
'rsquare': 5,
'infix_rule_hint': 6,
'equals': 7,
'stack_push': 8,
'code_start': 9,
'langle': 10,
'no_group': 11,
'expr_rule_hint': 12,
'partials': 13,
'regex': 14,
'rbrace': 15,
'code': 16,
'identifier': 17,
'regex_partial': 18,
'rangle': 19,
'language': 20,
'integer': 21,
'left': 22,
'rparen': 23,
'right': 24,
'mixfix_rule_hint': 25,
'colon': 26,
'expression_divider': 27,
'prefix_rule_hint': 28,
'asterisk': 29,
'll1_rule_hint': 30,
'string': 31,
'lexer': 32,
'grammar': 33,
'terminal': 34,
'lsquare': 35,
'parser': 36,
'lparen': 37,
'comma': 38,
'action': 39,
'pipe': 40,
'parser_expression': 41,
'nonterminal': 42,
'mode': 43,
'nonterminal_reference': 44,
'null': 45,
}
table = [
[-1, -1, 16, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 72, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 70, -1, 71, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 73, -1, -1, -1, -1, -1, -1, -1, 74, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 86, -1, -1, -1, -1, -1, -1, -1, -1, -1, 85, -1, -1, 84, -1, -1, -1, -1, -1, -1, -1, 83, -1, -1, 87],
[-1, -1, -1, 49, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1, -1, 50, -1, -1, 50, -1, -1, -1, -1, -1],
[-1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 59, -1, -1, -1, -1, -1, -1, -1, -1, -1, 60, -1, -1, -1, -1, -1, -1, -1, -1],
[7, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1],
[-1, -1, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, 64, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 64, -1, -1, 64, -1, -1, -1, -1, 64, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 79, -1],
[21, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 4, -1, -1, -1, -1, 4, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 23, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 69, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, 63, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 61, -1, -1, 62, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 51, -1, -1, -1, 51, -1, -1, -1, -1, -1, 51, -1, 51, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, 2, -1, -1, -1, -1, 2, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 38, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 37, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, 47, -1, 53, -1, -1, -1, -1, 53, 47, -1, -1, 52],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 82, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[35, -1, -1, -1, -1, -1, -1, -1, 35, 35, -1, 34, -1, 35, 35, 35, -1, 35, -1, -1, -1, -1, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 35, 34, -1, -1, -1, 35, -1, -1, -1, 35, -1, 35],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41, -1, -1, -1, -1, 42, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 32, -1, -1, -1, -1, -1, -1, -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, 33, -1, -1, -1, -1, -1, 39],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 76, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 56, -1, -1, -1, -1, -1, -1, -1, -1],
]
nonterminal_first = {
46: [2, -1],
47: [24, 4, 22],
48: [34, -1],
49: [36],
50: [34, 42, 17],
51: [34, 42, 21, 31, 45],
52: [3, -1],
53: [27, -1],
54: [0, 13, 43, 14, 9],
55: [43],
56: [34, 42, -1, 17],
57: [44, 17],
58: [0],
59: [32, 36, 41],
60: [37, -1],
61: [0, 13, 14, -1, 43, 9],
62: [36, -1, 41, 32],
63: [14, -1],
64: [37],
65: [34],
66: [12, 37],
67: [29, 1],
68: [30],
69: [37, 12, -1],
70: [9],
71: [28, 25, 6],
72: [34, -1, 3, 42, 17],
73: [17],
74: [32, 36, 41],
75: [-1, 17],
76: [35, 11],
77: [3, 34, -1, 42, 17],
78: [37],
79: [17],
80: [3, 34, 36, -1, 41, 42, 17, 45],
81: [34, -1, 39, 8, 17, 45],
82: [34, -1, 42, 17],
83: [-1, 17],
84: [2],
85: [14],
86: [27],
87: [29, 1],
88: [41],
89: [32],
90: [17],
91: [14, 0],
92: [35, 11, -1],
93: [-1, 17],
94: [36, 41],
95: [34, 39, 8, 17, 45],
96: [30, -1],
97: [33],
98: [13],
99: [3],
100: [34, -1, 31, 42, 21, 45],
101: [37, -1],
}
nonterminal_follow = {
46: [3],
47: [23],
48: [23],
49: [30, 32, 15, 36, 41],
50: [12, 30, 3, 34, 15, 37, 40, 42, 17],
51: [23, 38],
52: [27, 12, 30, 15, 37, 40],
53: [15, 12, 37, 3],
54: [0, 13, 14, 15, 43, 9],
55: [0, 13, 14, 15, 43, 9],
56: [15, 12, 37, 3],
57: [27, 12, 30, 15, 37, 40],
58: [0, 13, 14, 15, 43, 9],
59: [15, 36, 41, 32],
60: [15, 17],
61: [15],
62: [15],
63: [15],
64: [15, 17],
65: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
66: [15, 12, 37],
67: [26],
68: [15, 30],
69: [15],
70: [0, 13, 14, 15, 43, 9],
71: [15, 12, 37],
72: [15, 30, 40],
73: [23, 38],
74: [15, 36, 41, 32],
75: [15],
76: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
77: [15, 30],
78: [12],
79: [15, 17],
80: [15, 30],
81: [0, 13, 14, 15, 43, 9],
82: [12, 30, 3, 15, 37, 40],
83: [15, 23],
84: [3],
85: [15, 14],
86: [15, 12, 37, 3],
87: [23],
88: [30, 32, 15, 36, 41],
89: [15, 36, 41, 32],
90: [3, 12, 30, 34, 15, 37, 40, 42, 17],
91: [0, 13, 14, 15, 43, 9],
92: [0, 23, 8, 9, 13, 14, 34, 15, 39, 43, 17, 45],
93: [23],
94: [30, 32, 15, 36, 41],
95: [0, 13, 14, 34, 15, 39, 43, 8, 9, 17, 45],
96: [15],
97: [-1],
98: [0, 13, 14, 15, 43, 9],
99: [27, 12, 30, 15, 37, 40],
100: [23],
101: [12],
}
rule_first = {
0: [32, 36, -1, 41],
1: [33],
2: [32, 36, 41],
3: [32],
4: [36, 41],
5: [0, 13, 14, -1, 43, 9],
6: [32],
7: [14, 0],
8: [43],
9: [13],
10: [9],
11: [9],
12: [14, -1],
13: [13],
14: [14],
15: [0],
16: [2],
17: [-1],
18: [34, -1, 39, 8, 17, 45],
19: [14],
20: [-1, 17],
21: [0],
22: [37],
23: [-1],
24: [17],
25: [-1, 17],
26: [37],
27: [2],
28: [34],
29: [34],
30: [-1],
31: [17],
32: [8],
33: [39],
34: [35, 11],
35: [-1],
36: [34],
37: [35],
38: [11],
39: [45],
40: [43],
41: [36],
42: [41],
43: [30, -1],
44: [36],
45: [30],
46: [34, 3, -1, 17, 42],
47: [3, 42, 34, 17, -1],
48: [34, 42, -1, 17],
49: [3],
50: [-1],
51: [34, 42, -1, 3, 17],
52: [45],
53: [36, 41],
54: [12, 37, -1],
55: [41],
56: [37],
57: [-1],
58: [12, 37],
59: [27],
60: [-1],
61: [25],
62: [28],
63: [6],
64: [34, 42, -1, 17],
65: [27],
66: [37],
67: [29, 1],
68: [29],
69: [1],
70: [22],
71: [24],
72: [4],
73: [34],
74: [42],
75: [17],
76: [3],
77: [-1, 17],
78: [17],
79: [44],
80: [17],
81: [31, 21, 34, -1, 42, 45],
82: [17],
83: [42],
84: [34],
85: [31],
86: [21],
87: [45],
}
nonterminal_rules = {
46: [
"$_gen3 = $regex_options",
"$_gen3 = :_empty",
],
47: [
"$associativity = :left",
"$associativity = :right",
"$associativity = :unary",
],
48: [
"$_gen8 = $terminal",
"$_gen8 = :_empty",
],
49: [
"$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
],
50: [
"$morpheme = :terminal",
"$morpheme = :nonterminal",
"$morpheme = $macro",
],
51: [
"$macro_parameter = :nonterminal",
"$macro_parameter = :terminal",
"$macro_parameter = :string",
"$macro_parameter = :integer",
"$macro_parameter = :null",
],
52: [
"$_gen13 = $ast_transform",
"$_gen13 = :_empty",
],
53: [
"$_gen16 = $led",
"$_gen16 = :_empty",
],
54: [
"$lexer_atom = $lexer_regex",
"$lexer_atom = $lexer_mode",
"$lexer_atom = $lexer_partials",
"$lexer_atom = $lexer_code",
],
55: [
"$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
],
56: [
"$nud = $_gen12",
],
57: [
"$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
"$ast_transform_sub = :nonterminal_reference",
],
58: [
"$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
],
59: [
"$body_element_sub = $lexer",
"$body_element_sub = $parser",
],
60: [
"$_gen6 = $regex_enumeration_options",
"$_gen6 = :_empty",
],
61: [
"$_gen1 = list($lexer_atom)",
],
62: [
"$_gen0 = list($body_element)",
],
63: [
"$_gen2 = list($regex_partial)",
],
64: [
"$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
],
65: [
"$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
],
66: [
"$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
],
67: [
"$binding_power_marker = :asterisk",
"$binding_power_marker = :dash",
],
68: [
"$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
],
69: [
"$_gen14 = list($expression_rule)",
],
70: [
"$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
],
71: [
"$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
"$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
"$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
],
72: [
"$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
],
73: [
"$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
],
74: [
"$body_element = $body_element_sub",
],
75: [
"$_gen5 = list($regex_enumeration)",
],
76: [
"$match_group = :lsquare :integer :rsquare -> $1",
"$match_group = :no_group",
],
77: [
"$_gen11 = list($rule,:pipe)",
],
78: [
"$binding_power = :lparen $precedence :rparen -> $1",
],
79: [
"$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
],
80: [
"$ll1_rule_rhs = $_gen11",
"$ll1_rule_rhs = :null -> NullProduction( )",
"$ll1_rule_rhs = $parser",
],
81: [
"$_gen4 = list($lexer_target)",
],
82: [
"$_gen12 = list($morpheme)",
],
83: [
"$_gen7 = list(:identifier,:comma)",
],
84: [
"$regex_options = :lbrace $_gen7 :rbrace -> $1",
],
85: [
"$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
],
86: [
"$led = :expression_divider $_gen12 -> $1",
],
87: [
"$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
],
88: [
"$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
],
89: [
"$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
],
90: [
"$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
],
91: [
"$lexer_regex = $enumerated_regex",
"$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
],
92: [
"$_gen9 = $match_group",
"$_gen9 = :_empty",
],
93: [
"$_gen17 = list($ast_parameter,:comma)",
],
94: [
"$parser = $parser_ll1",
"$parser = $parser_expression",
],
95: [
"$lexer_target = $terminal",
"$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
"$lexer_target = :stack_push",
"$lexer_target = :action",
"$lexer_target = :null -> Null( )",
],
96: [
"$_gen10 = list($ll1_rule)",
],
97: [
"$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
],
98: [
"$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
],
99: [
"$ast_transform = :arrow $ast_transform_sub -> $1",
],
100: [
"$_gen18 = list($macro_parameter,:comma)",
],
101: [
"$_gen15 = $binding_power",
"$_gen15 = :_empty",
],
}
rules = {
0: "$_gen0 = list($body_element)",
1: "$grammar = :grammar :lbrace $_gen0 :rbrace -> Grammar( body=$2 )",
2: "$body_element = $body_element_sub",
3: "$body_element_sub = $lexer",
4: "$body_element_sub = $parser",
5: "$_gen1 = list($lexer_atom)",
6: "$lexer = :lexer :lbrace $_gen1 :rbrace -> Lexer( atoms=$2 )",
7: "$lexer_atom = $lexer_regex",
8: "$lexer_atom = $lexer_mode",
9: "$lexer_atom = $lexer_partials",
10: "$lexer_atom = $lexer_code",
11: "$lexer_code = :code_start :language :code -> LexerCode( language=$1, code=$2 )",
12: "$_gen2 = list($regex_partial)",
13: "$lexer_partials = :partials :lbrace $_gen2 :rbrace -> RegexPartials( list=$2 )",
14: "$regex_partial = :regex :arrow :regex_partial -> RegexPartial( regex=$0, name=$2 )",
15: "$lexer_regex = $enumerated_regex",
16: "$_gen3 = $regex_options",
17: "$_gen3 = :_empty",
18: "$_gen4 = list($lexer_target)",
19: "$lexer_regex = :regex $_gen3 :arrow $_gen4 -> Regex( regex=$0, options=$1, onmatch=$3 )",
20: "$_gen5 = list($regex_enumeration)",
21: "$enumerated_regex = :regex_enum :lbrace $_gen5 :rbrace :arrow $_gen4 -> EnumeratedRegex( enums=$2, onmatch=$5 )",
22: "$_gen6 = $regex_enumeration_options",
23: "$_gen6 = :_empty",
24: "$regex_enumeration = :identifier :colon :regex $_gen6 -> RegexEnum( language=$0, regex=$2, options=$3 )",
25: "$_gen7 = list(:identifier,:comma)",
26: "$regex_enumeration_options = :lparen $_gen7 :rparen -> $1",
27: "$regex_options = :lbrace $_gen7 :rbrace -> $1",
28: "$lexer_target = $terminal",
29: "$_gen8 = $terminal",
30: "$_gen8 = :_empty",
31: "$lexer_target = :identifier :lparen $_gen8 :rparen -> LexerFunctionCall( name=$0, terminal=$2 )",
32: "$lexer_target = :stack_push",
33: "$lexer_target = :action",
34: "$_gen9 = $match_group",
35: "$_gen9 = :_empty",
36: "$terminal = :terminal $_gen9 -> Terminal( name=$0, group=$1 )",
37: "$match_group = :lsquare :integer :rsquare -> $1",
38: "$match_group = :no_group",
39: "$lexer_target = :null -> Null( )",
40: "$lexer_mode = :mode :langle :identifier :rangle :lbrace $_gen1 :rbrace -> Mode( name=$2, atoms=$5 )",
41: "$parser = $parser_ll1",
42: "$parser = $parser_expression",
43: "$_gen10 = list($ll1_rule)",
44: "$parser_ll1 = :parser :lbrace $_gen10 :rbrace -> Parser( rules=$2 )",
45: "$ll1_rule = :ll1_rule_hint :nonterminal :equals $ll1_rule_rhs -> Rule( nonterminal=$1, production=$3 )",
46: "$_gen11 = list($rule,:pipe)",
47: "$ll1_rule_rhs = $_gen11",
48: "$_gen12 = list($morpheme)",
49: "$_gen13 = $ast_transform",
50: "$_gen13 = :_empty",
51: "$rule = $_gen12 $_gen13 -> Production( morphemes=$0, ast=$1 )",
52: "$ll1_rule_rhs = :null -> NullProduction( )",
53: "$ll1_rule_rhs = $parser",
54: "$_gen14 = list($expression_rule)",
55: "$parser_expression = :parser_expression :lbrace $_gen14 :rbrace -> ExpressionParser( rules=$2 )",
56: "$_gen15 = $binding_power",
57: "$_gen15 = :_empty",
58: "$expression_rule = $_gen15 :expr_rule_hint :nonterminal :equals $expression_rule_production -> ExpressionRule( precedence=$0, nonterminal=$2, production=$4 )",
59: "$_gen16 = $led",
60: "$_gen16 = :_empty",
61: "$expression_rule_production = :mixfix_rule_hint $nud $_gen13 $_gen16 $_gen13 -> MixfixProduction( nud=$1, nud_ast=$2, led=$3, ast=$4 )",
62: "$expression_rule_production = :prefix_rule_hint $_gen12 $_gen13 -> PrefixProduction( morphemes=$1, ast=$2 )",
63: "$expression_rule_production = :infix_rule_hint $_gen12 $_gen13 -> InfixProduction( morphemes=$1, ast=$2 )",
64: "$nud = $_gen12",
65: "$led = :expression_divider $_gen12 -> $1",
66: "$binding_power = :lparen $precedence :rparen -> $1",
67: "$precedence = $binding_power_marker :colon $associativity -> Precedence( marker=$0, associativity=$2 )",
68: "$binding_power_marker = :asterisk",
69: "$binding_power_marker = :dash",
70: "$associativity = :left",
71: "$associativity = :right",
72: "$associativity = :unary",
73: "$morpheme = :terminal",
74: "$morpheme = :nonterminal",
75: "$morpheme = $macro",
76: "$ast_transform = :arrow $ast_transform_sub -> $1",
77: "$_gen17 = list($ast_parameter,:comma)",
78: "$ast_transform_sub = :identifier :lparen $_gen17 :rparen -> AstTransformation( name=$0, parameters=$2 )",
79: "$ast_transform_sub = :nonterminal_reference",
80: "$ast_parameter = :identifier :equals :nonterminal_reference -> AstParameter( name=$0, index=$2 )",
81: "$_gen18 = list($macro_parameter,:comma)",
82: "$macro = :identifier :lparen $_gen18 :rparen -> Macro( name=$0, parameters=$2 )",
83: "$macro_parameter = :nonterminal",
84: "$macro_parameter = :terminal",
85: "$macro_parameter = :string",
86: "$macro_parameter = :integer",
87: "$macro_parameter = :null",
}
def is_terminal(id): return isinstance(id, int) and 0 <= id <= 45
def parse(tokens, errors=None, start=None):
if errors is None:
errors = DefaultSyntaxErrorHandler()
if isinstance(tokens, str):
tokens = lex(tokens, 'string', errors)
ctx = ParserContext(tokens, errors)
tree = parse_grammar(ctx)
if tokens.current() != None:
raise ctx.errors.excess_tokens()
return tree
def expect(ctx, terminal_id):
current = ctx.tokens.current()
if not current:
raise ctx.errors.no_more_tokens(ctx.nonterminal, terminals[terminal_id], ctx.tokens.last())
if current.id != terminal_id:
raise ctx.errors.unexpected_symbol(ctx.nonterminal, current, [terminals[terminal_id]], ctx.rule)
next = ctx.tokens.advance()
if next and not is_terminal(next.id):
raise ctx.errors.invalid_terminal(ctx.nonterminal, next)
return current
def parse__gen18(ctx):
tree = ParseTree(NonTerminal(100, '_gen18'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[100]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(100)):
tree.add(parse_macro_parameter(ctx))
ctx.nonterminal = "_gen18"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen5(ctx):
tree = ParseTree(NonTerminal(75, '_gen5'))
tree.list = True;
ctx.nonterminal = "_gen5"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[75]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(75)):
tree.add(parse_regex_enumeration(ctx))
ctx.nonterminal = "_gen5"
minimum = max(minimum - 1, 0)
return tree
def parse__gen11(ctx):
tree = ParseTree(NonTerminal(77, '_gen11'))
tree.list = True;
tree.list_separator_id = 40
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[77]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(77)):
tree.add(parse_rule(ctx))
ctx.nonterminal = "_gen11"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 40:
tree.add(expect(ctx, 40));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen17(ctx):
tree = ParseTree(NonTerminal(93, '_gen17'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[93]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(93)):
tree.add(parse_ast_parameter(ctx))
ctx.nonterminal = "_gen17"
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen1(ctx):
tree = ParseTree(NonTerminal(61, '_gen1'))
tree.list = True;
ctx.nonterminal = "_gen1"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[61]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(61)):
tree.add(parse_lexer_atom(ctx))
ctx.nonterminal = "_gen1"
minimum = max(minimum - 1, 0)
return tree
def parse__gen10(ctx):
tree = ParseTree(NonTerminal(96, '_gen10'))
tree.list = True;
ctx.nonterminal = "_gen10"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[96]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(96)):
tree.add(parse_ll1_rule(ctx))
ctx.nonterminal = "_gen10"
minimum = max(minimum - 1, 0)
return tree
def parse__gen0(ctx):
tree = ParseTree(NonTerminal(62, '_gen0'))
tree.list = True;
ctx.nonterminal = "_gen0"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[62]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(62)):
tree.add(parse_body_element(ctx))
ctx.nonterminal = "_gen0"
minimum = max(minimum - 1, 0)
return tree
def parse__gen4(ctx):
tree = ParseTree(NonTerminal(81, '_gen4'))
tree.list = True;
ctx.nonterminal = "_gen4"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[81]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(81)):
tree.add(parse_lexer_target(ctx))
ctx.nonterminal = "_gen4"
minimum = max(minimum - 1, 0)
return tree
def parse__gen2(ctx):
tree = ParseTree(NonTerminal(63, '_gen2'))
tree.list = True;
ctx.nonterminal = "_gen2"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[63]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(63)):
tree.add(parse_regex_partial(ctx))
ctx.nonterminal = "_gen2"
minimum = max(minimum - 1, 0)
return tree
def parse__gen12(ctx):
tree = ParseTree(NonTerminal(82, '_gen12'))
tree.list = True;
ctx.nonterminal = "_gen12"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[82]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(82)):
tree.add(parse_morpheme(ctx))
ctx.nonterminal = "_gen12"
minimum = max(minimum - 1, 0)
return tree
def parse__gen7(ctx):
tree = ParseTree(NonTerminal(83, '_gen7'))
tree.list = True;
tree.list_separator_id = 38
ctx.nonterminal = "_gen7"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[83]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(83)):
tree.add(expect(ctx, 17))
if ctx.tokens.current() is not None and ctx.tokens.current().id == 38:
tree.add(expect(ctx, 38));
else:
break
minimum = max(minimum - 1, 0)
return tree
def parse__gen14(ctx):
tree = ParseTree(NonTerminal(69, '_gen14'))
tree.list = True;
ctx.nonterminal = "_gen14"
if ctx.tokens.current() is not None and \
ctx.tokens.current().id not in nonterminal_first[101] and \
ctx.tokens.current().id in nonterminal_follow[69]:
return tree;
if ctx.tokens.current() is None:
return tree
minimum = 0;
while minimum > 0 or \
(ctx.tokens.current() is not None and \
ctx.tokens.current().id in nonterminal_first.get(69)):
tree.add(parse_expression_rule(ctx))
ctx.nonterminal = "_gen14"
minimum = max(minimum - 1, 0)
return tree
def parse__gen3(ctx):
current = ctx.tokens.current()
rule = table[0][current.id] if current else -1
tree = ParseTree(NonTerminal(46, '_gen3'))
ctx.nonterminal = "_gen3"
if current != None and current.id in nonterminal_follow[46] and current.id not in nonterminal_first[46]:
return tree
if current == None:
return tree
if rule == 16:
ctx.rule = rules[16]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_associativity(ctx):
current = ctx.tokens.current()
rule = table[1][current.id] if current else -1
tree = ParseTree(NonTerminal(47, 'associativity'))
ctx.nonterminal = "associativity"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 70:
ctx.rule = rules[70]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 22)
tree.add(t)
return tree
elif rule == 71:
ctx.rule = rules[71]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 24)
tree.add(t)
return tree
elif rule == 72:
ctx.rule = rules[72]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 4)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[47] if x >=0],
rules[72]
)
def parse__gen8(ctx):
current = ctx.tokens.current()
rule = table[2][current.id] if current else -1
tree = ParseTree(NonTerminal(48, '_gen8'))
ctx.nonterminal = "_gen8"
if current != None and current.id in nonterminal_follow[48] and current.id not in nonterminal_first[48]:
return tree
if current == None:
return tree
if rule == 29:
ctx.rule = rules[29]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser_ll1(ctx):
current = ctx.tokens.current()
rule = table[3][current.id] if current else -1
tree = ParseTree(NonTerminal(49, 'parser_ll1'))
ctx.nonterminal = "parser_ll1"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 44:
ctx.rule = rules[44]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('Parser', ast_parameters)
t = expect(ctx, 36)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen10(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[49] if x >=0],
rules[44]
)
def parse_morpheme(ctx):
current = ctx.tokens.current()
rule = table[4][current.id] if current else -1
tree = ParseTree(NonTerminal(50, 'morpheme'))
ctx.nonterminal = "morpheme"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 73:
ctx.rule = rules[73]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34)
tree.add(t)
return tree
elif rule == 74:
ctx.rule = rules[74]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42)
tree.add(t)
return tree
elif rule == 75:
ctx.rule = rules[75]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_macro(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[50] if x >=0],
rules[75]
)
def parse_macro_parameter(ctx):
current = ctx.tokens.current()
rule = table[5][current.id] if current else -1
tree = ParseTree(NonTerminal(51, 'macro_parameter'))
ctx.nonterminal = "macro_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 83:
ctx.rule = rules[83]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 42)
tree.add(t)
return tree
elif rule == 84:
ctx.rule = rules[84]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 34)
tree.add(t)
return tree
elif rule == 85:
ctx.rule = rules[85]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 31)
tree.add(t)
return tree
elif rule == 86:
ctx.rule = rules[86]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 21)
tree.add(t)
return tree
elif rule == 87:
ctx.rule = rules[87]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 45)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[51] if x >=0],
rules[87]
)
def parse__gen13(ctx):
current = ctx.tokens.current()
rule = table[6][current.id] if current else -1
tree = ParseTree(NonTerminal(52, '_gen13'))
ctx.nonterminal = "_gen13"
if current != None and current.id in nonterminal_follow[52] and current.id not in nonterminal_first[52]:
return tree
if current == None:
return tree
if rule == 49:
ctx.rule = rules[49]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_ast_transform(ctx)
tree.add(subtree)
return tree
return tree
def parse__gen16(ctx):
current = ctx.tokens.current()
rule = table[7][current.id] if current else -1
tree = ParseTree(NonTerminal(53, '_gen16'))
ctx.nonterminal = "_gen16"
if current != None and current.id in nonterminal_follow[53] and current.id not in nonterminal_first[53]:
return tree
if current == None:
return tree
if rule == 59:
ctx.rule = rules[59]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_led(ctx)
tree.add(subtree)
return tree
return tree
def parse_lexer_atom(ctx):
current = ctx.tokens.current()
rule = table[8][current.id] if current else -1
tree = ParseTree(NonTerminal(54, 'lexer_atom'))
ctx.nonterminal = "lexer_atom"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 7:
ctx.rule = rules[7]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_regex(ctx)
tree.add(subtree)
return tree
elif rule == 8:
ctx.rule = rules[8]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_mode(ctx)
tree.add(subtree)
return tree
elif rule == 9:
ctx.rule = rules[9]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_partials(ctx)
tree.add(subtree)
return tree
elif rule == 10:
ctx.rule = rules[10]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer_code(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[54] if x >=0],
rules[10]
)
def parse_lexer_mode(ctx):
current = ctx.tokens.current()
rule = table[9][current.id] if current else -1
tree = ParseTree(NonTerminal(55, 'lexer_mode'))
ctx.nonterminal = "lexer_mode"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 40:
ctx.rule = rules[40]
ast_parameters = OrderedDict([
('name', 2),
('atoms', 5),
])
tree.astTransform = AstTransformNodeCreator('Mode', ast_parameters)
t = expect(ctx, 43)
tree.add(t)
t = expect(ctx, 10)
tree.add(t)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 19)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[55] if x >=0],
rules[40]
)
def parse_nud(ctx):
current = ctx.tokens.current()
rule = table[10][current.id] if current else -1
tree = ParseTree(NonTerminal(56, 'nud'))
ctx.nonterminal = "nud"
if current != None and current.id in nonterminal_follow[56] and current.id not in nonterminal_first[56]:
return tree
if current == None:
return tree
if rule == 64:
ctx.rule = rules[64]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_transform_sub(ctx):
current = ctx.tokens.current()
rule = table[11][current.id] if current else -1
tree = ParseTree(NonTerminal(57, 'ast_transform_sub'))
ctx.nonterminal = "ast_transform_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 78:
ctx.rule = rules[78]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('AstTransformation', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen17(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
elif rule == 79:
ctx.rule = rules[79]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 44)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[57] if x >=0],
rules[79]
)
def parse_enumerated_regex(ctx):
current = ctx.tokens.current()
rule = table[12][current.id] if current else -1
tree = ParseTree(NonTerminal(58, 'enumerated_regex'))
ctx.nonterminal = "enumerated_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 21:
ctx.rule = rules[21]
ast_parameters = OrderedDict([
('enums', 2),
('onmatch', 5),
])
tree.astTransform = AstTransformNodeCreator('EnumeratedRegex', ast_parameters)
t = expect(ctx, 0)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen5(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
t = expect(ctx, 3)
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[58] if x >=0],
rules[21]
)
def parse_body_element_sub(ctx):
current = ctx.tokens.current()
rule = table[13][current.id] if current else -1
tree = ParseTree(NonTerminal(59, 'body_element_sub'))
ctx.nonterminal = "body_element_sub"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 3:
ctx.rule = rules[3]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_lexer(ctx)
tree.add(subtree)
return tree
elif rule == 4:
ctx.rule = rules[4]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[59] if x >=0],
rules[4]
)
def parse__gen6(ctx):
current = ctx.tokens.current()
rule = table[14][current.id] if current else -1
tree = ParseTree(NonTerminal(60, '_gen6'))
ctx.nonterminal = "_gen6"
if current != None and current.id in nonterminal_follow[60] and current.id not in nonterminal_first[60]:
return tree
if current == None:
return tree
if rule == 22:
ctx.rule = rules[22]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_regex_enumeration_options(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_enumeration_options(ctx):
current = ctx.tokens.current()
rule = table[18][current.id] if current else -1
tree = ParseTree(NonTerminal(64, 'regex_enumeration_options'))
ctx.nonterminal = "regex_enumeration_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 26:
ctx.rule = rules[26]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[64] if x >=0],
rules[26]
)
def parse_terminal(ctx):
current = ctx.tokens.current()
rule = table[19][current.id] if current else -1
tree = ParseTree(NonTerminal(65, 'terminal'))
ctx.nonterminal = "terminal"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 36:
ctx.rule = rules[36]
ast_parameters = OrderedDict([
('name', 0),
('group', 1),
])
tree.astTransform = AstTransformNodeCreator('Terminal', ast_parameters)
t = expect(ctx, 34)
tree.add(t)
subtree = parse__gen9(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[65] if x >=0],
rules[36]
)
def parse_expression_rule(ctx):
current = ctx.tokens.current()
rule = table[20][current.id] if current else -1
tree = ParseTree(NonTerminal(66, 'expression_rule'))
ctx.nonterminal = "expression_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 58:
ctx.rule = rules[58]
ast_parameters = OrderedDict([
('precedence', 0),
('nonterminal', 2),
('production', 4),
])
tree.astTransform = AstTransformNodeCreator('ExpressionRule', ast_parameters)
subtree = parse__gen15(ctx)
tree.add(subtree)
t = expect(ctx, 12)
tree.add(t)
t = expect(ctx, 42)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
subtree = parse_expression_rule_production(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[66] if x >=0],
rules[58]
)
def parse_binding_power_marker(ctx):
current = ctx.tokens.current()
rule = table[21][current.id] if current else -1
tree = ParseTree(NonTerminal(67, 'binding_power_marker'))
ctx.nonterminal = "binding_power_marker"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 68:
ctx.rule = rules[68]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 29)
tree.add(t)
return tree
elif rule == 69:
ctx.rule = rules[69]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 1)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[67] if x >=0],
rules[69]
)
def parse_ll1_rule(ctx):
current = ctx.tokens.current()
rule = table[22][current.id] if current else -1
tree = ParseTree(NonTerminal(68, 'll1_rule'))
ctx.nonterminal = "ll1_rule"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 45:
ctx.rule = rules[45]
ast_parameters = OrderedDict([
('nonterminal', 1),
('production', 3),
])
tree.astTransform = AstTransformNodeCreator('Rule', ast_parameters)
t = expect(ctx, 30)
tree.add(t)
t = expect(ctx, 42)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
subtree = parse_ll1_rule_rhs(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[68] if x >=0],
rules[45]
)
def parse_lexer_code(ctx):
current = ctx.tokens.current()
rule = table[24][current.id] if current else -1
tree = ParseTree(NonTerminal(70, 'lexer_code'))
ctx.nonterminal = "lexer_code"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 11:
ctx.rule = rules[11]
ast_parameters = OrderedDict([
('language', 1),
('code', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerCode', ast_parameters)
t = expect(ctx, 9)
tree.add(t)
t = expect(ctx, 20)
tree.add(t)
t = expect(ctx, 16)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[70] if x >=0],
rules[11]
)
def parse_expression_rule_production(ctx):
current = ctx.tokens.current()
rule = table[25][current.id] if current else -1
tree = ParseTree(NonTerminal(71, 'expression_rule_production'))
ctx.nonterminal = "expression_rule_production"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 61:
ctx.rule = rules[61]
ast_parameters = OrderedDict([
('nud', 1),
('nud_ast', 2),
('led', 3),
('ast', 4),
])
tree.astTransform = AstTransformNodeCreator('MixfixProduction', ast_parameters)
t = expect(ctx, 25)
tree.add(t)
subtree = parse_nud(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
subtree = parse__gen16(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 62:
ctx.rule = rules[62]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('PrefixProduction', ast_parameters)
t = expect(ctx, 28)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
elif rule == 63:
ctx.rule = rules[63]
ast_parameters = OrderedDict([
('morphemes', 1),
('ast', 2),
])
tree.astTransform = AstTransformNodeCreator('InfixProduction', ast_parameters)
t = expect(ctx, 6)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[71] if x >=0],
rules[63]
)
def parse_rule(ctx):
current = ctx.tokens.current()
rule = table[26][current.id] if current else -1
tree = ParseTree(NonTerminal(72, 'rule'))
ctx.nonterminal = "rule"
if current != None and current.id in nonterminal_follow[72] and current.id not in nonterminal_first[72]:
return tree
if current == None:
return tree
if rule == 51:
ctx.rule = rules[51]
ast_parameters = OrderedDict([
('morphemes', 0),
('ast', 1),
])
tree.astTransform = AstTransformNodeCreator('Production', ast_parameters)
subtree = parse__gen12(ctx)
tree.add(subtree)
subtree = parse__gen13(ctx)
tree.add(subtree)
return tree
return tree
def parse_ast_parameter(ctx):
current = ctx.tokens.current()
rule = table[27][current.id] if current else -1
tree = ParseTree(NonTerminal(73, 'ast_parameter'))
ctx.nonterminal = "ast_parameter"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 80:
ctx.rule = rules[80]
ast_parameters = OrderedDict([
('name', 0),
('index', 2),
])
tree.astTransform = AstTransformNodeCreator('AstParameter', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 7)
tree.add(t)
t = expect(ctx, 44)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[73] if x >=0],
rules[80]
)
def parse_body_element(ctx):
current = ctx.tokens.current()
rule = table[28][current.id] if current else -1
tree = ParseTree(NonTerminal(74, 'body_element'))
ctx.nonterminal = "body_element"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 2:
ctx.rule = rules[2]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_body_element_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[74] if x >=0],
rules[2]
)
def parse_match_group(ctx):
current = ctx.tokens.current()
rule = table[30][current.id] if current else -1
tree = ParseTree(NonTerminal(76, 'match_group'))
ctx.nonterminal = "match_group"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 37:
ctx.rule = rules[37]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 35)
tree.add(t)
t = expect(ctx, 21)
tree.add(t)
t = expect(ctx, 5)
tree.add(t)
return tree
elif rule == 38:
ctx.rule = rules[38]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 11)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[76] if x >=0],
rules[38]
)
def parse_binding_power(ctx):
current = ctx.tokens.current()
rule = table[32][current.id] if current else -1
tree = ParseTree(NonTerminal(78, 'binding_power'))
ctx.nonterminal = "binding_power"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 66:
ctx.rule = rules[66]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 37)
tree.add(t)
subtree = parse_precedence(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[78] if x >=0],
rules[66]
)
def parse_regex_enumeration(ctx):
current = ctx.tokens.current()
rule = table[33][current.id] if current else -1
tree = ParseTree(NonTerminal(79, 'regex_enumeration'))
ctx.nonterminal = "regex_enumeration"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 24:
ctx.rule = rules[24]
ast_parameters = OrderedDict([
('language', 0),
('regex', 2),
('options', 3),
])
tree.astTransform = AstTransformNodeCreator('RegexEnum', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 26)
tree.add(t)
t = expect(ctx, 14)
tree.add(t)
subtree = parse__gen6(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[79] if x >=0],
rules[24]
)
def parse_ll1_rule_rhs(ctx):
current = ctx.tokens.current()
rule = table[34][current.id] if current else -1
tree = ParseTree(NonTerminal(80, 'll1_rule_rhs'))
ctx.nonterminal = "ll1_rule_rhs"
if current != None and current.id in nonterminal_follow[80] and current.id not in nonterminal_first[80]:
return tree
if current == None:
return tree
if rule == 47:
ctx.rule = rules[47]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse__gen11(ctx)
tree.add(subtree)
return tree
elif rule == 52:
ctx.rule = rules[52]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('NullProduction', ast_parameters)
t = expect(ctx, 45)
tree.add(t)
return tree
elif rule == 53:
ctx.rule = rules[53]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser(ctx)
tree.add(subtree)
return tree
return tree
def parse_regex_options(ctx):
current = ctx.tokens.current()
rule = table[38][current.id] if current else -1
tree = ParseTree(NonTerminal(84, 'regex_options'))
ctx.nonterminal = "regex_options"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 27:
ctx.rule = rules[27]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen7(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[84] if x >=0],
rules[27]
)
def parse_regex_partial(ctx):
current = ctx.tokens.current()
rule = table[39][current.id] if current else -1
tree = ParseTree(NonTerminal(85, 'regex_partial'))
ctx.nonterminal = "regex_partial"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 14:
ctx.rule = rules[14]
ast_parameters = OrderedDict([
('regex', 0),
('name', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartial', ast_parameters)
t = expect(ctx, 14)
tree.add(t)
t = expect(ctx, 3)
tree.add(t)
t = expect(ctx, 18)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[85] if x >=0],
rules[14]
)
def parse_led(ctx):
current = ctx.tokens.current()
rule = table[40][current.id] if current else -1
tree = ParseTree(NonTerminal(86, 'led'))
ctx.nonterminal = "led"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 65:
ctx.rule = rules[65]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 27)
tree.add(t)
subtree = parse__gen12(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[86] if x >=0],
rules[65]
)
def parse_precedence(ctx):
current = ctx.tokens.current()
rule = table[41][current.id] if current else -1
tree = ParseTree(NonTerminal(87, 'precedence'))
ctx.nonterminal = "precedence"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 67:
ctx.rule = rules[67]
ast_parameters = OrderedDict([
('marker', 0),
('associativity', 2),
])
tree.astTransform = AstTransformNodeCreator('Precedence', ast_parameters)
subtree = parse_binding_power_marker(ctx)
tree.add(subtree)
t = expect(ctx, 26)
tree.add(t)
subtree = parse_associativity(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[87] if x >=0],
rules[67]
)
def parse_parser_expression(ctx):
current = ctx.tokens.current()
rule = table[42][current.id] if current else -1
tree = ParseTree(NonTerminal(88, 'parser_expression'))
ctx.nonterminal = "parser_expression"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 55:
ctx.rule = rules[55]
ast_parameters = OrderedDict([
('rules', 2),
])
tree.astTransform = AstTransformNodeCreator('ExpressionParser', ast_parameters)
t = expect(ctx, 41)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen14(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[88] if x >=0],
rules[55]
)
def parse_lexer(ctx):
current = ctx.tokens.current()
rule = table[43][current.id] if current else -1
tree = ParseTree(NonTerminal(89, 'lexer'))
ctx.nonterminal = "lexer"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 6:
ctx.rule = rules[6]
ast_parameters = OrderedDict([
('atoms', 2),
])
tree.astTransform = AstTransformNodeCreator('Lexer', ast_parameters)
t = expect(ctx, 32)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen1(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[89] if x >=0],
rules[6]
)
def parse_macro(ctx):
current = ctx.tokens.current()
rule = table[44][current.id] if current else -1
tree = ParseTree(NonTerminal(90, 'macro'))
ctx.nonterminal = "macro"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 82:
ctx.rule = rules[82]
ast_parameters = OrderedDict([
('name', 0),
('parameters', 2),
])
tree.astTransform = AstTransformNodeCreator('Macro', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen18(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[90] if x >=0],
rules[82]
)
def parse_lexer_regex(ctx):
current = ctx.tokens.current()
rule = table[45][current.id] if current else -1
tree = ParseTree(NonTerminal(91, 'lexer_regex'))
ctx.nonterminal = "lexer_regex"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 15:
ctx.rule = rules[15]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_enumerated_regex(ctx)
tree.add(subtree)
return tree
elif rule == 19:
ctx.rule = rules[19]
ast_parameters = OrderedDict([
('regex', 0),
('options', 1),
('onmatch', 3),
])
tree.astTransform = AstTransformNodeCreator('Regex', ast_parameters)
t = expect(ctx, 14)
tree.add(t)
subtree = parse__gen3(ctx)
tree.add(subtree)
t = expect(ctx, 3)
tree.add(t)
subtree = parse__gen4(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[91] if x >=0],
rules[19]
)
def parse__gen9(ctx):
current = ctx.tokens.current()
rule = table[46][current.id] if current else -1
tree = ParseTree(NonTerminal(92, '_gen9'))
ctx.nonterminal = "_gen9"
if current != None and current.id in nonterminal_follow[92] and current.id not in nonterminal_first[92]:
return tree
if current == None:
return tree
if rule == 34:
ctx.rule = rules[34]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_match_group(ctx)
tree.add(subtree)
return tree
return tree
def parse_parser(ctx):
current = ctx.tokens.current()
rule = table[48][current.id] if current else -1
tree = ParseTree(NonTerminal(94, 'parser'))
ctx.nonterminal = "parser"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 41:
ctx.rule = rules[41]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_ll1(ctx)
tree.add(subtree)
return tree
elif rule == 42:
ctx.rule = rules[42]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_parser_expression(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[94] if x >=0],
rules[42]
)
def parse_lexer_target(ctx):
current = ctx.tokens.current()
rule = table[49][current.id] if current else -1
tree = ParseTree(NonTerminal(95, 'lexer_target'))
ctx.nonterminal = "lexer_target"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 28:
ctx.rule = rules[28]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_terminal(ctx)
tree.add(subtree)
return tree
elif rule == 31:
ctx.rule = rules[31]
ast_parameters = OrderedDict([
('name', 0),
('terminal', 2),
])
tree.astTransform = AstTransformNodeCreator('LexerFunctionCall', ast_parameters)
t = expect(ctx, 17)
tree.add(t)
t = expect(ctx, 37)
tree.add(t)
subtree = parse__gen8(ctx)
tree.add(subtree)
t = expect(ctx, 23)
tree.add(t)
return tree
elif rule == 32:
ctx.rule = rules[32]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 8)
tree.add(t)
return tree
elif rule == 33:
ctx.rule = rules[33]
tree.astTransform = AstTransformSubstitution(0)
t = expect(ctx, 39)
tree.add(t)
return tree
elif rule == 39:
ctx.rule = rules[39]
ast_parameters = OrderedDict([
])
tree.astTransform = AstTransformNodeCreator('Null', ast_parameters)
t = expect(ctx, 45)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[95] if x >=0],
rules[39]
)
def parse_grammar(ctx):
current = ctx.tokens.current()
rule = table[51][current.id] if current else -1
tree = ParseTree(NonTerminal(97, 'grammar'))
ctx.nonterminal = "grammar"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 1:
ctx.rule = rules[1]
ast_parameters = OrderedDict([
('body', 2),
])
tree.astTransform = AstTransformNodeCreator('Grammar', ast_parameters)
t = expect(ctx, 33)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen0(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[97] if x >=0],
rules[1]
)
def parse_lexer_partials(ctx):
current = ctx.tokens.current()
rule = table[52][current.id] if current else -1
tree = ParseTree(NonTerminal(98, 'lexer_partials'))
ctx.nonterminal = "lexer_partials"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 13:
ctx.rule = rules[13]
ast_parameters = OrderedDict([
('list', 2),
])
tree.astTransform = AstTransformNodeCreator('RegexPartials', ast_parameters)
t = expect(ctx, 13)
tree.add(t)
t = expect(ctx, 2)
tree.add(t)
subtree = parse__gen2(ctx)
tree.add(subtree)
t = expect(ctx, 15)
tree.add(t)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[98] if x >=0],
rules[13]
)
def parse_ast_transform(ctx):
current = ctx.tokens.current()
rule = table[53][current.id] if current else -1
tree = ParseTree(NonTerminal(99, 'ast_transform'))
ctx.nonterminal = "ast_transform"
if current == None:
raise ctx.errors.unexpected_eof()
if rule == 76:
ctx.rule = rules[76]
tree.astTransform = AstTransformSubstitution(1)
t = expect(ctx, 3)
tree.add(t)
subtree = parse_ast_transform_sub(ctx)
tree.add(subtree)
return tree
raise ctx.errors.unexpected_symbol(
ctx.nonterminal,
ctx.tokens.current(),
[terminals[x] for x in nonterminal_first[99] if x >=0],
rules[76]
)
def parse__gen15(ctx):
current = ctx.tokens.current()
rule = table[55][current.id] if current else -1
tree = ParseTree(NonTerminal(101, '_gen15'))
ctx.nonterminal = "_gen15"
if current != None and current.id in nonterminal_follow[101] and current.id not in nonterminal_first[101]:
return tree
if current == None:
return tree
if rule == 56:
ctx.rule = rules[56]
tree.astTransform = AstTransformSubstitution(0)
subtree = parse_binding_power(ctx)
tree.add(subtree)
return tree
return tree
def emit(ctx, terminal, source_string, line, col):
if terminal:
ctx.tokens.append(Terminal(terminals[terminal], terminal, source_string, ctx.resource, line, col))
def default_action(ctx, terminal, source_string, line, col):
emit(ctx, terminal, source_string, line, col)
def init():
return {}
def destroy(context):
pass
class LexerStackPush:
def __init__(self, mode):
self.mode = mode
class LexerAction:
def __init__(self, action):
self.action = action
class LexerContext:
def __init__(self, string, resource, errors, user_context):
self.__dict__.update(locals())
self.stack = ['default']
self.line = 1
self.col = 1
self.tokens = []
self.user_context = user_context
self.re_match = None exer:
regex = {
'default': OrderedDict([
(re.compile(r'(grammar)\s*({)'), [
('grammar', 1, None),
('lbrace', 2, None),
LexerStackPush('grammar'),
]),
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
]),
'grammar': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'lexer'), [
('lexer', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'parser'), [
('parser', 0, None),
LexerStackPush('parser'),
]),
]),
'lexer': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'code<([a-z]+)>\s*<<\s*([a-zA-Z_]+)(?=\s)(.*?)(\2)', re.DOTALL), [
('code_start', 2, None),
('language', 1, None),
('code', 3, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'<'), [
('langle', 0, None),
]),
(re.compile(r'>'), [
('rangle', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r'\[\]'), [
('no_group', 0, None),
]),
(re.compile(r'\['), [
('lsquare', 0, None),
]),
(re.compile(r'\]'), [
('rsquare', 0, None),
]),
(re.compile(r'[0-9]+'), [
('integer', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
('regex', 0, None),
LexerStackPush('regex_options'),
]),
(re.compile(r'->'), [
('arrow', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)'), [
('stack_push', 1, None),
]),
(re.compile(r'%([a-zA-Z][a-zA-Z0-9_]*)'), [
('action', 1, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
('terminal', 1, None),
]),
(re.compile(r'_[a-zA-Z][a-zA-Z0-9_]*'), [
('regex_partial', 0, None),
]),
(re.compile(r'null'), [
('null', 0, None),
]),
(re.compile(r'mode'), [
('mode', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'partials'), [
('partials', 0, None),
LexerStackPush('lexer'),
]),
(re.compile(r'enum'), [
('regex_enum', 0, None),
LexerStackPush('regex_enum'),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
]),
'regex_enum': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r':'), [
('colon', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'(r\'(\\\'|[^\'])*\'|"(\\\"|[^\"])*")'), [
('regex', 0, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
]),
'regex_options': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
]),
(re.compile(r'->'), [
('arrow', 0, None),
LexerAction('pop'),
]),
]),
'parser': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\#.*'), [
]),
(re.compile(r'{'), [
('lbrace', 0, None),
]),
(re.compile(r'}'), [
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\|'), [
('pipe', 0, None),
]),
(re.compile(r'='), [
('equals', 0, None),
]),
(re.compile(r'\('), [
('lparen', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
]),
(re.compile(r','), [
('comma', 0, None),
]),
(re.compile(r'->'), [
('arrow', 0, None),
]),
(re.compile(r'null'), [
('null', 0, None),
]),
(re.compile(r'parser\s*<\s*expression\s*>\s*({)'), [
('parser_expression', None, None),
('lbrace', 1, None),
LexerStackPush('parser_expr'),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
('terminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)(?=\s*\=)'), [
('ll1_rule_hint', None, None),
('nonterminal', 1, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
# (terminal, group, function)
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
# (terminal, group, function)
('integer', 0, None),
]),
]),
'parser_expr': OrderedDict([
(re.compile(r'\s+'), [
# (terminal, group, function)
]),
(re.compile(r'\#.*'), [
# (terminal, group, function)
]),
(re.compile(r'(\()(?=\s*[\*-])'), [
# (terminal, group, function)
('lparen', 1, None),
LexerStackPush('binding_power'),
]),
(re.compile(r'->'), [
# (terminal, group, function)
('arrow', 0, None),
]),
(re.compile(r'<=>'), [
# (terminal, group, function)
('expression_divider', 0, None),
]),
(re.compile(r'\|'), [
# (terminal, group, function)
('pipe', 0, None),
]),
(re.compile(r'='), [
# (terminal, group, function)
('equals', 0, None),
]),
(re.compile(r'{'), [
# (terminal, group, function)
('lbrace', 0, None),
]),
(re.compile(r'}'), [
# (terminal, group, function)
('rbrace', 0, None),
LexerAction('pop'),
]),
(re.compile(r'\('), [
# (terminal, group, function)
('lparen', 0, None),
]),
(re.compile(r'\)'), [
# (terminal, group, function)
('rparen', 0, None),
]),
(re.compile(r','), [
# (terminal, group, function)
('comma', 0, None),
]),
(re.compile(r':([a-zA-Z][a-zA-Z0-9_]*|_empty)'), [
# (terminal, group, function)
('terminal', 1, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*\1[ \t]+:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?![ \t]+(:|\$))'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('infix_rule_hint', None, None),
('nonterminal', 2, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'(\$([a-zA-Z][a-zA-Z0-9_]*))[ \t]*(=)[ \t]*:([a-zA-Z][a-zA-Z0-9_]*)[ \t]+\1(?)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 2, None),
('equals', 3, None),
('prefix_rule_hint', None, None),
('terminal', 4, None),
('nonterminal', 2, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)\s*(=)'), [
# (terminal, group, function)
('expr_rule_hint', None, None),
('nonterminal', 1, None),
('equals', 2, None),
('mixfix_rule_hint', None, None),
]),
(re.compile(r'\$([a-zA-Z][a-zA-Z0-9_]*)'), [
# (terminal, group, function)
('nonterminal', 1, None),
]),
(re.compile(r'\$([0-9]+|\$)'), [
# (terminal, group, function)
('nonterminal_reference', 1, None),
]),
(re.compile(r'[a-zA-Z][a-zA-Z0-9_]*'), [
# (terminal, group, function)
('identifier', 0, None),
]),
(re.compile(r'"[^"]+"'), [
('string', 0, None),
]),
(re.compile(r'[0-9]+'), [
('integer', 0, None),
]),
]),
'binding_power': OrderedDict([
(re.compile(r'\s+'), [
]),
(re.compile(r'\*'), [
('asterisk', 0, None),
]),
(re.compile(r'-'), [
('dash', 0, None),
]),
(re.compile(r':'), [
('colon', 0, None),
]),
(re.compile(r'left'), [
('left', 0, None),
]),
(re.compile(r'right'), [
('right', 0, None),
]),
(re.compile(r'unary'), [
('unary', 0, None),
]),
(re.compile(r'\)'), [
('rparen', 0, None),
LexerAction('pop'),
]),
]),
}
def _advance_line_col(self, string, length, line, col):
for i in range(length):
if string[i] == '\n':
line += 1
col = 1
else:
col += 1
return (line, col)
def _advance_string(self, ctx, string):
(ctx.line, ctx.col) = self._advance_line_col(string, len(string), ctx.line, ctx.col)
ctx.string = ctx.string[len(string):]
def _next(self, ctx, debug=False):
for regex, outputs in self.regex[ctx.stack[-1]].items():
if debug:
from xtermcolor import colorize
token_count = len(ctx.tokens)
print('{1} ({2}, {3}) regex: {0}'.format(
colorize(regex.pattern, ansi=40), colorize(ctx.string[:20].replace('\n', '\\n'), ansi=15), ctx.line, ctx.col)
)
match = regex.match(ctx.string)
if match:
ctx.re_match = match
for output in outputs:
if isinstance(output, tuple):
(terminal, group, function) = output
function = function if function else default_action
source_string = match.group(group) if group is not None else ''
(group_line, group_col) = self._advance_line_col(ctx.string, match.start(group) if group else 0, ctx.line, ctx.col)
function(
ctx,
terminal,
source_string,
group_line,
group_col
)
if debug:
print(' matched: {}'.format(colorize(match.group(0).replace('\n', '\\n'), ansi=3)))
for token in ctx.tokens[token_count:]:
print(' emit: [{}] [{}, {}] [{}] stack:{} context:{}'.format(
colorize(token.str, ansi=9),
colorize(str(token.line), ansi=5),
colorize(str(token.col), ansi=5),
colorize(token.source_string, ansi=3),
colorize(str(ctx.stack), ansi=4),
colorize(str(ctx.user_context), ansi=13)
))
token_count = len(ctx.tokens)
if isinstance(output, LexerStackPush):
ctx.stack.append(output.mode)
if debug:
print(' push on stack: {}'.format(colorize(output.mode, ansi=4)))
if isinstance(output, LexerAction):
if output.action == 'pop':
mode = ctx.stack.pop()
if debug:
print(' pop off stack: {}'.format(colorize(mode, ansi=4)))
self._advance_string(ctx, match.group(0))
return len(match.group(0)) > 0
return False
def lex(self, string, resource, errors=None, debug=False):
if errors is None:
errors = DefaultSyntaxErrorHandler()
string_copy = string
user_context = init()
ctx = LexerContext(string, resource, errors, user_context)
while len(ctx.string):
matched = self._next(ctx, debug)
if matched == False:
raise ctx.errors.unrecognized_token(string_copy, ctx.line, ctx.col)
destroy(ctx.user_context)
return ctx.tokens
def lex(source, resource, errors=None, debug=False):
return TokenStream(HermesLexer().lex(source, resource, errors, debug))
| true | true |
f71c81a8b1726d61edd4af204b0813341e2fdc17 | 20,285 | py | Python | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | null | null | null | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | 669 | 2021-01-25T10:26:46.000Z | 2022-03-31T22:01:58.000Z | pkg/suggestion/v1beta1/nas/enas/service.py | Adarsh2910/katib | cd095d6a33401cfddee8188943b60cd12c950c33 | [
"Apache-2.0"
] | 1 | 2021-09-10T06:56:10.000Z | 2021-09-10T06:56:10.000Z | import logging
from logging import getLogger, StreamHandler, INFO
import json
import os
import tensorflow as tf
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.nas.enas.Controller import Controller
from pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace
from pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (
parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
class EnasExperiment:
def __init__(self, request, logger):
self.logger = logger
self.experiment_name = request.experiment.name
self.experiment = request.experiment
self.num_trials = 1
self.tf_graph = tf.Graph()
self.ctrl_cache_file = "ctrl_cache/{}.ckpt".format(
self.experiment_name)
self.suggestion_step = 0
self.algorithm_settings = None
self.controller = None
self.num_layers = None
self.input_sizes = None
self.output_sizes = None
self.num_operations = None
self.search_space = None
self.opt_direction = None
self.objective_name = None
self.logger.info("-" * 100 + "\nSetting Up Suggestion for Experiment {}\n".format(
self.experiment_name) + "-" * 100)
self._get_experiment_param()
self._setup_controller()
self.logger.info(">>> Suggestion for Experiment {} has been initialized.\n".format(
self.experiment_name))
def _get_experiment_param(self):
# this function need to
# 1) get the number of layers
# 2) get the I/O size
# 3) get the available operations
# 4) get the optimization direction (i.e. minimize or maximize)
# 5) get the objective name
# 6) get the algorithm settings
# Get Search Space
self.opt_direction = self.experiment.spec.objective.type
self.objective_name = self.experiment.spec.objective.objective_metric_name
nas_config = self.experiment.spec.nas_config
graph_config = nas_config.graph_config
self.num_layers = int(graph_config.num_layers)
self.input_sizes = list(map(int, graph_config.input_sizes))
self.output_sizes = list(map(int, graph_config.output_sizes))
search_space_raw = nas_config.operations
search_space_object = SearchSpace(search_space_raw)
self.search_space = search_space_object.search_space
self.num_operations = search_space_object.num_operations
self.print_search_space()
# Get Experiment Algorithm Settings
settings_raw = self.experiment.spec.algorithm.algorithm_settings
self.algorithm_settings = parseAlgorithmSettings(settings_raw)
self.print_algorithm_settings()
def _setup_controller(self):
with self.tf_graph.as_default():
self.controller = Controller(
num_layers=self.num_layers,
num_operations=self.num_operations,
controller_hidden_size=self.algorithm_settings['controller_hidden_size'],
controller_temperature=self.algorithm_settings['controller_temperature'],
controller_tanh_const=self.algorithm_settings['controller_tanh_const'],
controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],
controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],
controller_learning_rate=self.algorithm_settings["controller_learning_rate"],
controller_skip_target=self.algorithm_settings['controller_skip_target'],
controller_skip_weight=self.algorithm_settings['controller_skip_weight'],
controller_name="Ctrl_" + self.experiment_name,
logger=self.logger)
self.controller.build_trainer()
def print_search_space(self):
if self.search_space is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(
">>> Search Space for Experiment {}".format(self.experiment_name))
for opt in self.search_space:
opt.print_op(self.logger)
self.logger.info(
"There are {} operations in total.\n".format(self.num_operations))
def print_algorithm_settings(self):
if self.algorithm_settings is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(">>> Parameters of LSTM Controller for Experiment {}\n".format(
self.experiment_name))
for spec in self.algorithm_settings:
if len(spec) > 22:
self.logger.info("{}:\t{}".format(
spec, self.algorithm_settings[spec]))
else:
self.logger.info("{}:\t\t{}".format(
spec, self.algorithm_settings[spec]))
self.logger.info("")
class EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):
def __init__(self, logger=None):
super(EnasService, self).__init__()
self.is_first_run = True
self.experiment = None
if logger == None:
self.logger = getLogger(__name__)
FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'
logging.basicConfig(format=FORMAT)
handler = StreamHandler()
handler.setLevel(INFO)
self.logger.setLevel(INFO)
self.logger.addHandler(handler)
self.logger.propagate = False
else:
self.logger = logger
if not os.path.exists("ctrl_cache/"):
os.makedirs("ctrl_cache/")
def ValidateAlgorithmSettings(self, request, context):
self.logger.info("Validate Algorithm Settings start")
graph_config = request.experiment.spec.nas_config.graph_config
# Validate GraphConfig
# Check InputSize
if not graph_config.input_sizes:
return self.SetValidateContextError(context, "Missing InputSizes in GraphConfig:\n{}".format(graph_config))
# Check OutputSize
if not graph_config.output_sizes:
return self.SetValidateContextError(context, "Missing OutputSizes in GraphConfig:\n{}".format(graph_config))
# Check NumLayers
if not graph_config.num_layers:
return self.SetValidateContextError(context, "Missing NumLayers in GraphConfig:\n{}".format(graph_config))
# Validate each operation
operations_list = list(
request.experiment.spec.nas_config.operations.operation)
for operation in operations_list:
# Check OperationType
if not operation.operation_type:
return self.SetValidateContextError(context, "Missing operationType in Operation:\n{}".format(operation))
# Check ParameterConfigs
if not operation.parameter_specs.parameters:
return self.SetValidateContextError(context, "Missing ParameterConfigs in Operation:\n{}".format(operation))
# Validate each ParameterConfig in Operation
parameters_list = list(operation.parameter_specs.parameters)
for parameter in parameters_list:
# Check Name
if not parameter.name:
return self.SetValidateContextError(context, "Missing Name in ParameterConfig:\n{}".format(parameter))
# Check ParameterType
if not parameter.parameter_type:
return self.SetValidateContextError(context, "Missing ParameterType in ParameterConfig:\n{}".format(parameter))
# Check List in Categorical or Discrete Type
if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:
if not parameter.feasible_space.list:
return self.SetValidateContextError(context, "Missing List in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Check Max, Min, Step in Int or Double Type
elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:
if not parameter.feasible_space.min and not parameter.feasible_space.max:
return self.SetValidateContextError(context, "Missing Max and Min in ParameterConfig.feasibleSpace:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):
return self.SetValidateContextError(context, "Step parameter should be > 0 in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Validate Algorithm Settings
settings_raw = request.experiment.spec.algorithm.algorithm_settings
for setting in settings_raw:
if setting.name in algorithmSettingsValidator.keys():
if setting.name in enableNoneSettingsList and setting.value == "None":
continue
setting_type = algorithmSettingsValidator[setting.name][0]
setting_range = algorithmSettingsValidator[setting.name][1]
try:
converted_value = setting_type(setting.value)
except:
return self.SetValidateContextError(context, "Algorithm Setting {} must be {} type".format(setting.name, setting_type.__name__))
if setting_type == float:
if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range ({}, {}]".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
elif converted_value < setting_range[0]:
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range [{}, {})".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
else:
return self.SetValidateContextError(context, "Unknown Algorithm Setting name: {}".format(setting.name))
self.logger.info("All Experiment Settings are Valid")
return api_pb2.ValidateAlgorithmSettingsReply()
def SetValidateContextError(self, context, error_message):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error_message)
self.logger.info(error_message)
return api_pb2.ValidateAlgorithmSettingsReply()
def GetSuggestions(self, request, context):
if self.is_first_run:
self.experiment = EnasExperiment(request, self.logger)
experiment = self.experiment
if request.request_number > 0:
experiment.num_trials = request.request_number
self.logger.info("-" * 100 + "\nSuggestion Step {} for Experiment {}\n".format(
experiment.suggestion_step, experiment.experiment_name) + "-" * 100)
self.logger.info("")
self.logger.info(">>> RequestNumber:\t\t{}".format(experiment.num_trials))
self.logger.info("")
with experiment.tf_graph.as_default():
saver = tf.compat.v1.train.Saver()
ctrl = experiment.controller
controller_ops = {
"loss": ctrl.loss,
"entropy": ctrl.sample_entropy,
"grad_norm": ctrl.grad_norm,
"baseline": ctrl.baseline,
"skip_rate": ctrl.skip_rate,
"train_op": ctrl.train_op,
"train_step": ctrl.train_step,
"sample_arc": ctrl.sample_arc,
"child_val_accuracy": ctrl.child_val_accuracy,
}
if self.is_first_run:
self.logger.info(">>> First time running suggestion for {}. Random architecture will be given.".format(
experiment.experiment_name))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
# TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart
saver.save(sess, experiment.ctrl_cache_file)
self.is_first_run = False
else:
with tf.compat.v1.Session() as sess:
saver.restore(sess, experiment.ctrl_cache_file)
result = self.GetEvaluationResult(request.trials)
# TODO: (andreyvelich) I deleted this part, should it be handle by controller?
# Sometimes training container may fail and GetEvaluationResult() will return None
# In this case, the Suggestion will:
# 1. Firstly try to respawn the previous trials after waiting for RESPAWN_SLEEP seconds
# 2. If respawning the trials for RESPAWN_LIMIT times still cannot collect valid results,
# then fail the task because it may indicate that the training container has errors.
if result is None:
self.logger.warning(
">>> Suggestion has spawned trials, but they all failed.")
self.logger.warning(
">>> Please check whether the training container is correctly implemented")
self.logger.info(">>> Experiment {} failed".format(
experiment.experiment_name))
return []
# This LSTM network is designed to maximize the metrics
# However, if the user wants to minimize the metrics, we can take the negative of the result
if experiment.opt_direction == api_pb2.MINIMIZE:
result = -result
self.logger.info(">>> Suggestion updated. LSTM Controller Training\n")
log_every = experiment.algorithm_settings["controller_log_every_steps"]
for ctrl_step in range(1, experiment.algorithm_settings["controller_train_steps"]+1):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["grad_norm"],
controller_ops["baseline"],
controller_ops["skip_rate"],
controller_ops["train_op"]
]
loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(
fetches=run_ops,
feed_dict={controller_ops["child_val_accuracy"]: result})
controller_step = sess.run(controller_ops["train_step"])
if ctrl_step % log_every == 0:
log_string = ""
log_string += "Controller Step: {} - ".format(controller_step)
log_string += "Loss: {:.4f} - ".format(loss)
log_string += "Entropy: {:.9} - ".format(entropy)
log_string += "Gradient Norm: {:.7f} - ".format(grad_norm)
log_string += "Baseline={:.4f} - ".format(baseline)
log_string += "Skip Rate={:.4f}".format(skip_rate)
self.logger.info(log_string)
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
organized_candidates = list()
parameter_assignments = list()
for i in range(experiment.num_trials):
arc = candidates[i].tolist()
organized_arc = [0 for _ in range(experiment.num_layers)]
record = 0
for l in range(experiment.num_layers):
organized_arc[l] = arc[record: record + l + 1]
record += l + 1
organized_candidates.append(organized_arc)
nn_config = dict()
nn_config['num_layers'] = experiment.num_layers
nn_config['input_sizes'] = experiment.input_sizes
nn_config['output_sizes'] = experiment.output_sizes
nn_config['embedding'] = dict()
for l in range(experiment.num_layers):
opt = organized_arc[l][0]
nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()
organized_arc_json = json.dumps(organized_arc)
nn_config_json = json.dumps(nn_config)
organized_arc_str = str(organized_arc_json).replace('\"', '\'')
nn_config_str = str(nn_config_json).replace('\"', '\'')
self.logger.info(
"\n>>> New Neural Network Architecture Candidate #{} (internal representation):".format(i))
self.logger.info(organized_arc_json)
self.logger.info("\n>>> Corresponding Seach Space Description:")
self.logger.info(nn_config_str)
parameter_assignments.append(
api_pb2.GetSuggestionsReply.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value=organized_arc_str
),
api_pb2.ParameterAssignment(
name="nn_config",
value=nn_config_str
)
]
)
)
self.logger.info("")
self.logger.info(">>> {} Trials were created for Experiment {}".format(
experiment.num_trials, experiment.experiment_name))
self.logger.info("")
experiment.suggestion_step += 1
return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)
def GetEvaluationResult(self, trials_list):
completed_trials = dict()
failed_trials = []
for t in trials_list:
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:
target_value = None
for metric in t.status.observation.metrics:
if metric.name == t.spec.objective.objective_metric_name:
target_value = metric.value
break
# Take only the first metric value
# In current cifar-10 training container this value is the latest
completed_trials[t.name] = float(target_value)
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:
failed_trials.append(t.name)
n_completed = len(completed_trials)
self.logger.info(">>> By now: {} Trials succeeded, {} Trials failed".format(
n_completed, len(failed_trials)))
for tname in completed_trials:
self.logger.info("Trial: {}, Value: {}".format(
tname, completed_trials[tname]))
for tname in failed_trials:
self.logger.info("Trial: {} was failed".format(tname))
if n_completed > 0:
avg_metrics = sum(completed_trials.values()) / n_completed
self.logger.info("The average is {}\n".format(avg_metrics))
return avg_metrics
| 46.956019 | 156 | 0.603944 | import logging
from logging import getLogger, StreamHandler, INFO
import json
import os
import tensorflow as tf
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.nas.enas.Controller import Controller
from pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace
from pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (
parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
class EnasExperiment:
def __init__(self, request, logger):
self.logger = logger
self.experiment_name = request.experiment.name
self.experiment = request.experiment
self.num_trials = 1
self.tf_graph = tf.Graph()
self.ctrl_cache_file = "ctrl_cache/{}.ckpt".format(
self.experiment_name)
self.suggestion_step = 0
self.algorithm_settings = None
self.controller = None
self.num_layers = None
self.input_sizes = None
self.output_sizes = None
self.num_operations = None
self.search_space = None
self.opt_direction = None
self.objective_name = None
self.logger.info("-" * 100 + "\nSetting Up Suggestion for Experiment {}\n".format(
self.experiment_name) + "-" * 100)
self._get_experiment_param()
self._setup_controller()
self.logger.info(">>> Suggestion for Experiment {} has been initialized.\n".format(
self.experiment_name))
def _get_experiment_param(self):
self.opt_direction = self.experiment.spec.objective.type
self.objective_name = self.experiment.spec.objective.objective_metric_name
nas_config = self.experiment.spec.nas_config
graph_config = nas_config.graph_config
self.num_layers = int(graph_config.num_layers)
self.input_sizes = list(map(int, graph_config.input_sizes))
self.output_sizes = list(map(int, graph_config.output_sizes))
search_space_raw = nas_config.operations
search_space_object = SearchSpace(search_space_raw)
self.search_space = search_space_object.search_space
self.num_operations = search_space_object.num_operations
self.print_search_space()
settings_raw = self.experiment.spec.algorithm.algorithm_settings
self.algorithm_settings = parseAlgorithmSettings(settings_raw)
self.print_algorithm_settings()
def _setup_controller(self):
with self.tf_graph.as_default():
self.controller = Controller(
num_layers=self.num_layers,
num_operations=self.num_operations,
controller_hidden_size=self.algorithm_settings['controller_hidden_size'],
controller_temperature=self.algorithm_settings['controller_temperature'],
controller_tanh_const=self.algorithm_settings['controller_tanh_const'],
controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],
controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],
controller_learning_rate=self.algorithm_settings["controller_learning_rate"],
controller_skip_target=self.algorithm_settings['controller_skip_target'],
controller_skip_weight=self.algorithm_settings['controller_skip_weight'],
controller_name="Ctrl_" + self.experiment_name,
logger=self.logger)
self.controller.build_trainer()
def print_search_space(self):
if self.search_space is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(
">>> Search Space for Experiment {}".format(self.experiment_name))
for opt in self.search_space:
opt.print_op(self.logger)
self.logger.info(
"There are {} operations in total.\n".format(self.num_operations))
def print_algorithm_settings(self):
if self.algorithm_settings is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(">>> Parameters of LSTM Controller for Experiment {}\n".format(
self.experiment_name))
for spec in self.algorithm_settings:
if len(spec) > 22:
self.logger.info("{}:\t{}".format(
spec, self.algorithm_settings[spec]))
else:
self.logger.info("{}:\t\t{}".format(
spec, self.algorithm_settings[spec]))
self.logger.info("")
class EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):
def __init__(self, logger=None):
super(EnasService, self).__init__()
self.is_first_run = True
self.experiment = None
if logger == None:
self.logger = getLogger(__name__)
FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'
logging.basicConfig(format=FORMAT)
handler = StreamHandler()
handler.setLevel(INFO)
self.logger.setLevel(INFO)
self.logger.addHandler(handler)
self.logger.propagate = False
else:
self.logger = logger
if not os.path.exists("ctrl_cache/"):
os.makedirs("ctrl_cache/")
def ValidateAlgorithmSettings(self, request, context):
self.logger.info("Validate Algorithm Settings start")
graph_config = request.experiment.spec.nas_config.graph_config
if not graph_config.input_sizes:
return self.SetValidateContextError(context, "Missing InputSizes in GraphConfig:\n{}".format(graph_config))
if not graph_config.output_sizes:
return self.SetValidateContextError(context, "Missing OutputSizes in GraphConfig:\n{}".format(graph_config))
if not graph_config.num_layers:
return self.SetValidateContextError(context, "Missing NumLayers in GraphConfig:\n{}".format(graph_config))
operations_list = list(
request.experiment.spec.nas_config.operations.operation)
for operation in operations_list:
if not operation.operation_type:
return self.SetValidateContextError(context, "Missing operationType in Operation:\n{}".format(operation))
if not operation.parameter_specs.parameters:
return self.SetValidateContextError(context, "Missing ParameterConfigs in Operation:\n{}".format(operation))
parameters_list = list(operation.parameter_specs.parameters)
for parameter in parameters_list:
if not parameter.name:
return self.SetValidateContextError(context, "Missing Name in ParameterConfig:\n{}".format(parameter))
if not parameter.parameter_type:
return self.SetValidateContextError(context, "Missing ParameterType in ParameterConfig:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:
if not parameter.feasible_space.list:
return self.SetValidateContextError(context, "Missing List in ParameterConfig.feasibleSpace:\n{}".format(parameter))
elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:
if not parameter.feasible_space.min and not parameter.feasible_space.max:
return self.SetValidateContextError(context, "Missing Max and Min in ParameterConfig.feasibleSpace:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):
return self.SetValidateContextError(context, "Step parameter should be > 0 in ParameterConfig.feasibleSpace:\n{}".format(parameter))
settings_raw = request.experiment.spec.algorithm.algorithm_settings
for setting in settings_raw:
if setting.name in algorithmSettingsValidator.keys():
if setting.name in enableNoneSettingsList and setting.value == "None":
continue
setting_type = algorithmSettingsValidator[setting.name][0]
setting_range = algorithmSettingsValidator[setting.name][1]
try:
converted_value = setting_type(setting.value)
except:
return self.SetValidateContextError(context, "Algorithm Setting {} must be {} type".format(setting.name, setting_type.__name__))
if setting_type == float:
if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range ({}, {}]".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
elif converted_value < setting_range[0]:
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range [{}, {})".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
else:
return self.SetValidateContextError(context, "Unknown Algorithm Setting name: {}".format(setting.name))
self.logger.info("All Experiment Settings are Valid")
return api_pb2.ValidateAlgorithmSettingsReply()
def SetValidateContextError(self, context, error_message):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error_message)
self.logger.info(error_message)
return api_pb2.ValidateAlgorithmSettingsReply()
def GetSuggestions(self, request, context):
if self.is_first_run:
self.experiment = EnasExperiment(request, self.logger)
experiment = self.experiment
if request.request_number > 0:
experiment.num_trials = request.request_number
self.logger.info("-" * 100 + "\nSuggestion Step {} for Experiment {}\n".format(
experiment.suggestion_step, experiment.experiment_name) + "-" * 100)
self.logger.info("")
self.logger.info(">>> RequestNumber:\t\t{}".format(experiment.num_trials))
self.logger.info("")
with experiment.tf_graph.as_default():
saver = tf.compat.v1.train.Saver()
ctrl = experiment.controller
controller_ops = {
"loss": ctrl.loss,
"entropy": ctrl.sample_entropy,
"grad_norm": ctrl.grad_norm,
"baseline": ctrl.baseline,
"skip_rate": ctrl.skip_rate,
"train_op": ctrl.train_op,
"train_step": ctrl.train_step,
"sample_arc": ctrl.sample_arc,
"child_val_accuracy": ctrl.child_val_accuracy,
}
if self.is_first_run:
self.logger.info(">>> First time running suggestion for {}. Random architecture will be given.".format(
experiment.experiment_name))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
self.is_first_run = False
else:
with tf.compat.v1.Session() as sess:
saver.restore(sess, experiment.ctrl_cache_file)
result = self.GetEvaluationResult(request.trials)
if result is None:
self.logger.warning(
">>> Suggestion has spawned trials, but they all failed.")
self.logger.warning(
">>> Please check whether the training container is correctly implemented")
self.logger.info(">>> Experiment {} failed".format(
experiment.experiment_name))
return []
if experiment.opt_direction == api_pb2.MINIMIZE:
result = -result
self.logger.info(">>> Suggestion updated. LSTM Controller Training\n")
log_every = experiment.algorithm_settings["controller_log_every_steps"]
for ctrl_step in range(1, experiment.algorithm_settings["controller_train_steps"]+1):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["grad_norm"],
controller_ops["baseline"],
controller_ops["skip_rate"],
controller_ops["train_op"]
]
loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(
fetches=run_ops,
feed_dict={controller_ops["child_val_accuracy"]: result})
controller_step = sess.run(controller_ops["train_step"])
if ctrl_step % log_every == 0:
log_string = ""
log_string += "Controller Step: {} - ".format(controller_step)
log_string += "Loss: {:.4f} - ".format(loss)
log_string += "Entropy: {:.9} - ".format(entropy)
log_string += "Gradient Norm: {:.7f} - ".format(grad_norm)
log_string += "Baseline={:.4f} - ".format(baseline)
log_string += "Skip Rate={:.4f}".format(skip_rate)
self.logger.info(log_string)
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
organized_candidates = list()
parameter_assignments = list()
for i in range(experiment.num_trials):
arc = candidates[i].tolist()
organized_arc = [0 for _ in range(experiment.num_layers)]
record = 0
for l in range(experiment.num_layers):
organized_arc[l] = arc[record: record + l + 1]
record += l + 1
organized_candidates.append(organized_arc)
nn_config = dict()
nn_config['num_layers'] = experiment.num_layers
nn_config['input_sizes'] = experiment.input_sizes
nn_config['output_sizes'] = experiment.output_sizes
nn_config['embedding'] = dict()
for l in range(experiment.num_layers):
opt = organized_arc[l][0]
nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()
organized_arc_json = json.dumps(organized_arc)
nn_config_json = json.dumps(nn_config)
organized_arc_str = str(organized_arc_json).replace('\"', '\'')
nn_config_str = str(nn_config_json).replace('\"', '\'')
self.logger.info(
"\n>>> New Neural Network Architecture Candidate #{} (internal representation):".format(i))
self.logger.info(organized_arc_json)
self.logger.info("\n>>> Corresponding Seach Space Description:")
self.logger.info(nn_config_str)
parameter_assignments.append(
api_pb2.GetSuggestionsReply.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value=organized_arc_str
),
api_pb2.ParameterAssignment(
name="nn_config",
value=nn_config_str
)
]
)
)
self.logger.info("")
self.logger.info(">>> {} Trials were created for Experiment {}".format(
experiment.num_trials, experiment.experiment_name))
self.logger.info("")
experiment.suggestion_step += 1
return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)
def GetEvaluationResult(self, trials_list):
completed_trials = dict()
failed_trials = []
for t in trials_list:
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:
target_value = None
for metric in t.status.observation.metrics:
if metric.name == t.spec.objective.objective_metric_name:
target_value = metric.value
break
completed_trials[t.name] = float(target_value)
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:
failed_trials.append(t.name)
n_completed = len(completed_trials)
self.logger.info(">>> By now: {} Trials succeeded, {} Trials failed".format(
n_completed, len(failed_trials)))
for tname in completed_trials:
self.logger.info("Trial: {}, Value: {}".format(
tname, completed_trials[tname]))
for tname in failed_trials:
self.logger.info("Trial: {} was failed".format(tname))
if n_completed > 0:
avg_metrics = sum(completed_trials.values()) / n_completed
self.logger.info("The average is {}\n".format(avg_metrics))
return avg_metrics
| true | true |
f71c84d85474a8f5aa729fc1e185f9a029c9a09c | 6,121 | py | Python | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-mi-gen-nn | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | 5 | 2020-02-21T15:13:57.000Z | 2021-08-05T15:18:40.000Z | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-membership-inference-against-generative-networks | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | null | null | null | Monte-Carlo-Attacks/Monte-Carlo-CIFAR_VAE/cifar10_train.py | SAP-samples/security-research-membership-inference-against-generative-networks | 15627f73fcc497c87a67f41957f6b82881dff353 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
# import parameters
from cifar10_params import *
from utils import *
# tensorflow uses channels_last
# theano uses channels_first
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
# encoder architecture
x = Input(shape=original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
# mean and variance for latent variables
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
# sampling layer
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# decoder architecture
decoder_hid = Dense(int(intermediate_dim), activation='relu')
decoder_upsample = Dense(int(filters * img_rows / 2 * img_cols / 2), activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, int(img_rows / 2), int(img_cols / 2))
else:
output_shape = (batch_size, int(img_rows / 2), int(img_cols / 2), filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
# Custom loss layer
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
# entire model
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
# load dataset
# (x_train, _), (x_test, y_test) = cifar10.load_data()
# x_train = x_train.astype('float32') / 255.
# x_train = x_train.reshape((x_train.shape[0],) + original_img_size)
# x_test = x_test.astype('float32') / 255.
# x_test = x_test.reshape((x_test.shape[0],) + original_img_size)
x_train, x_test = load_cifar10_with_validation(0.1, False)
# training
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
# encoder from learned model
encoder = Model(x, z_mean)
# generator / decoder from learned model
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
# save all 3 models for future use - especially generator
vae.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
# save training history
fname = './models/cifar10_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
| 37.09697 | 123 | 0.663944 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pickle
from keras.layers import Input, Dense, Lambda, Flatten, Reshape, Layer
from keras.layers import Conv2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras import metrics
from cifar10_params import *
from utils import *
if K.image_data_format() == 'channels_first':
original_img_size = (img_chns, img_rows, img_cols)
else:
original_img_size = (img_rows, img_cols, img_chns)
x = Input(shape=original_img_size)
conv_1 = Conv2D(img_chns,
kernel_size=(2, 2),
padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
kernel_size=(2, 2),
padding='same', activation='relu',
strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_2)
conv_4 = Conv2D(filters,
kernel_size=num_conv,
padding='same', activation='relu',
strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
decoder_hid = Dense(int(intermediate_dim), activation='relu')
decoder_upsample = Dense(int(filters * img_rows / 2 * img_cols / 2), activation='relu')
if K.image_data_format() == 'channels_first':
output_shape = (batch_size, filters, int(img_rows / 2), int(img_cols / 2))
else:
output_shape = (batch_size, int(img_rows / 2), int(img_cols / 2), filters)
decoder_reshape = Reshape(output_shape[1:])
decoder_deconv_1 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_2 = Conv2DTranspose(filters,
kernel_size=num_conv,
padding='same',
strides=1,
activation='relu')
decoder_deconv_3_upsamp = Conv2DTranspose(filters,
kernel_size=(3, 3),
strides=(2, 2),
padding='valid',
activation='relu')
decoder_mean_squash = Conv2D(img_chns,
kernel_size=2,
padding='valid',
activation='sigmoid')
hid_decoded = decoder_hid(z)
up_decoded = decoder_upsample(hid_decoded)
reshape_decoded = decoder_reshape(up_decoded)
deconv_1_decoded = decoder_deconv_1(reshape_decoded)
deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)
class CustomVariationalLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(CustomVariationalLayer, self).__init__(**kwargs)
def vae_loss(self, x, x_decoded_mean_squash):
x = K.flatten(x)
x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean_squash = inputs[1]
loss = self.vae_loss(x, x_decoded_mean_squash)
self.add_loss(loss, inputs=inputs)
return x
y = CustomVariationalLayer()([x, x_decoded_mean_squash])
vae = Model(x, y)
vae.compile(optimizer='rmsprop', loss=None)
vae.summary()
x_train, x_test = load_cifar10_with_validation(0.1, False)
history = vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
encoder = Model(x, z_mean)
decoder_input = Input(shape=(latent_dim,))
_hid_decoded = decoder_hid(decoder_input)
_up_decoded = decoder_upsample(_hid_decoded)
_reshape_decoded = decoder_reshape(_up_decoded)
_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
generator = Model(decoder_input, _x_decoded_mean_squash)
vae.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_vae.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
encoder.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_encoder.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
generator.save('./models/cifar10_ld_%d_conv_%d_id_%d_e_%d_generator.h5' % (latent_dim, num_conv, intermediate_dim, epochs))
fname = './models/cifar10_ld_%d_conv_%d_id_%d_e_%d_history.pkl' % (latent_dim, num_conv, intermediate_dim, epochs)
with open(fname, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
| true | true |
f71c8578ec45fa13ff3af1382cbd44bcc86f9bbe | 93 | py | Python | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | CVgallery/apps.py | siavashMehran/Portfolio | a592ec51122d96e8e336365fd3cd039a7f223221 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CvgalleryConfig(AppConfig):
name = 'CVgallery'
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class CvgalleryConfig(AppConfig):
name = 'CVgallery'
| true | true |
f71c861ea7dd94eca7c2a5bcbc500411f6590433 | 2,705 | py | Python | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | null | null | null | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | 1 | 2020-11-22T22:00:13.000Z | 2020-11-22T22:00:13.000Z | castle/kivy_wrapper.py | chappers/castle | 0abdb4eed91c45b443c0de8f029dff983f921363 | [
"MIT"
] | null | null | null | """
A simple kivy wrapper
"""
import kivy
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.clock import Clock
"""
A really simple discrete environment to test for changing policies/environment
"""
import numpy as np
import random
from gym.spaces import Box, Discrete, Dict
import gym
from gym import Wrapper
class KivyWrapper(BoxLayout):
def __init__(self, env=None, **kwargs):
super(KivyWrapper, self).__init__(**kwargs)
self.env = env
self.action = None
self.info = Label(text="Starting Game", font_name="RobotoMono-Regular")
# self._trigger = Clock.schedule_interval(self.update, 1.0/60.0)
self.add_widget(self.info)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self, "text")
if self._keyboard.widget:
# If it exists, this widget is a VKeyboard object which you can use
# to change the keyboard layout.
pass
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def show_screen(self, board, info, update):
text = ""
if update and board is not None:
text += "\n".join(board)
text += "\n"
text += "\n".join(info)
self.info.text = text
def update(self, dt):
for idx in range(10):
if self.action == str(idx):
self.action = idx
if self.action is not None:
text_render, info, done = self.env.play(self.action)
else:
text_render, info = self.env.render()
self.show_screen(text_render, info, True)
self.action = None
def _keyboard_closed(self):
# print('My keyboard have been closed!')
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
key_register = modifiers + [text]
# print("Key input received is:\n{}".format(key_register))
self.action = text
# Keycode is composed of an integer + a string
# If we hit escape, release the keyboard
if keycode[1] == "escape":
keyboard.release()
# Return True to accept the key. Otherwise, it will be used by
# the system.
return True
def app_wrapper(env):
class KivyApp(App):
def build(self):
game = KivyWrapper(env=env)
game.env.reset()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
return KivyApp
| 29.725275 | 85 | 0.629945 |
import kivy
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.clock import Clock
import numpy as np
import random
from gym.spaces import Box, Discrete, Dict
import gym
from gym import Wrapper
class KivyWrapper(BoxLayout):
def __init__(self, env=None, **kwargs):
super(KivyWrapper, self).__init__(**kwargs)
self.env = env
self.action = None
self.info = Label(text="Starting Game", font_name="RobotoMono-Regular")
self.add_widget(self.info)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self, "text")
if self._keyboard.widget:
pass
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def show_screen(self, board, info, update):
text = ""
if update and board is not None:
text += "\n".join(board)
text += "\n"
text += "\n".join(info)
self.info.text = text
def update(self, dt):
for idx in range(10):
if self.action == str(idx):
self.action = idx
if self.action is not None:
text_render, info, done = self.env.play(self.action)
else:
text_render, info = self.env.render()
self.show_screen(text_render, info, True)
self.action = None
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
key_register = modifiers + [text]
self.action = text
if keycode[1] == "escape":
keyboard.release()
return True
def app_wrapper(env):
class KivyApp(App):
def build(self):
game = KivyWrapper(env=env)
game.env.reset()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
return KivyApp
| true | true |
f71c862ef26b8cf209313fbb5ff5c086291c53ca | 1,093 | py | Python | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | python/analysis/TargetScanDB.py | mjoppich/miRExplore | 32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1 | [
"Apache-2.0"
] | null | null | null | import re
from collections import defaultdict
from openpyxl import load_workbook
class TargetScanDB :
def __init__(self):
self.elems = []
self.gene2mirnas = defaultdict(list)
def make_dictionary(self):
for elem in self.elems:
self.gene2mirnas[elem[0]].append(elem)
@classmethod
def from_tsv(cls, filelocation="/mnt/c/ownCloud/data/miRExplore/targetscan/targetscan_ws_85.tsv"):
tsdb = TargetScanDB()
with open(filelocation, 'r') as fin:
for idx, row in enumerate(fin):
if idx == 0:
continue
arow = row.strip().split('\t')
gene = arow[0].upper()
mirna = arow[1]
score = float(arow[2])
percentile = int(arow[3])
mirna = mirna.replace('mmu-', '').replace('hsa-', '')
tsdb.elems.append((gene, mirna, score, percentile))
return tsdb
if __name__ == '__main__':
tsdb = TargetScanDB.from_tsv()
for x in tsdb.elems:
print(x)
| 19.517857 | 102 | 0.548948 | import re
from collections import defaultdict
from openpyxl import load_workbook
class TargetScanDB :
def __init__(self):
self.elems = []
self.gene2mirnas = defaultdict(list)
def make_dictionary(self):
for elem in self.elems:
self.gene2mirnas[elem[0]].append(elem)
@classmethod
def from_tsv(cls, filelocation="/mnt/c/ownCloud/data/miRExplore/targetscan/targetscan_ws_85.tsv"):
tsdb = TargetScanDB()
with open(filelocation, 'r') as fin:
for idx, row in enumerate(fin):
if idx == 0:
continue
arow = row.strip().split('\t')
gene = arow[0].upper()
mirna = arow[1]
score = float(arow[2])
percentile = int(arow[3])
mirna = mirna.replace('mmu-', '').replace('hsa-', '')
tsdb.elems.append((gene, mirna, score, percentile))
return tsdb
if __name__ == '__main__':
tsdb = TargetScanDB.from_tsv()
for x in tsdb.elems:
print(x)
| true | true |
f71c86d03bc2eedb4697b0730ac3f051ebb54808 | 15,522 | py | Python | rasa_nlu/project.py | osmanbaskaya/rasa_nlu | 4f0b5d0fd0d058e437e7d74369cef212fd0a345b | [
"Apache-2.0"
] | null | null | null | rasa_nlu/project.py | osmanbaskaya/rasa_nlu | 4f0b5d0fd0d058e437e7d74369cef212fd0a345b | [
"Apache-2.0"
] | 6 | 2020-09-26T00:52:34.000Z | 2022-02-10T01:37:38.000Z | rasa_nlu/project.py | esrel/rasa_nlu | 53840788e41b2daf957ec5d488281f70e238730f | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import tempfile
import zipfile
from threading import Lock, Thread
from typing import Text, List
import six
import time
from builtins import object
from requests.exceptions import InvalidURL, RequestException
from rasa_nlu import utils
from rasa_nlu.classifiers.keyword_intent_classifier import \
KeywordIntentClassifier
from rasa_nlu.model import Metadata, Interpreter
from rasa_nlu.utils import is_url, EndpointConfig
if six.PY2:
from StringIO import StringIO as IOReader
else:
from io import BytesIO as IOReader
logger = logging.getLogger(__name__)
MODEL_NAME_PREFIX = "model_"
FALLBACK_MODEL_NAME = "fallback"
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
def load_from_server(component_builder=None, # type: Optional[Text]
project=None, # type: Optional[Text]
project_dir=None, # type: Optional[Text]
remote_storage=None, # type: Optional[Text]
model_server=None, # type: Optional[EndpointConfig]
wait_time_between_pulls=None, # type: Optional[int]
):
# type: (...) -> Project
"""Load a persisted model from a server."""
project = Project(component_builder=component_builder,
project=project,
project_dir=project_dir,
remote_storage=remote_storage)
_update_model_from_server(model_server, project)
if wait_time_between_pulls:
# continuously pull the model every `wait_time_between_pulls` seconds
start_model_pulling_in_worker(model_server,
wait_time_between_pulls,
project)
return project
def _update_model_from_server(model_server, project):
# type: (EndpointConfig, Project) -> None
"""Load a zipped Rasa NLU model from a URL and update the passed
project."""
if not is_url(model_server.url):
raise InvalidURL(model_server)
model_directory = tempfile.mkdtemp()
new_model_fingerprint, filename = _pull_model_and_fingerprint(
model_server, model_directory, project.fingerprint)
if new_model_fingerprint:
model_name = _get_remote_model_name(filename)
project.fingerprint = new_model_fingerprint
project.update_model_from_dir_and_unload_others(model_directory,
model_name)
else:
logger.debug("No new model found at URL {}".format(model_server.url))
def _get_remote_model_name(filename):
# type: (Optional[Text]) -> Text
"""Get the name to save a model under that was fetched from a
remote server."""
if filename is not None: # use the filename header if present
return filename.strip(".zip")
else: # or else use a timestamp
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return MODEL_NAME_PREFIX + timestamp
def _pull_model_and_fingerprint(model_server, model_directory, fingerprint):
# type: (EndpointConfig, Text, Optional[Text]) -> (Optional[Text], Optional[Text])
"""Queries the model server and returns a tuple of containing the
response's <ETag> header which contains the model hash, and the
<filename> header containing the model name."""
header = {"If-None-Match": fingerprint}
try:
logger.debug("Requesting model from server {}..."
"".format(model_server.url))
response = model_server.request(method="GET",
headers=header,
timeout=DEFAULT_REQUEST_TIMEOUT)
except RequestException as e:
logger.warning("Tried to fetch model from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
return None, None
if response.status_code == 204:
logger.debug("Model server returned 204 status code, indicating "
"that no new model is available. "
"Current fingerprint: {}".format(fingerprint))
return response.headers.get("ETag"), response.headers.get("filename")
elif response.status_code == 404:
logger.debug("Model server didn't find a model for our request. "
"Probably no one did train a model for the project "
"and tag combination yet.")
return None, None
elif response.status_code != 200:
logger.warn("Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(response.status_code))
return None, None
zip_ref = zipfile.ZipFile(IOReader(response.content))
zip_ref.extractall(model_directory)
logger.debug("Unzipped model to {}"
"".format(os.path.abspath(model_directory)))
# get the new fingerprint and filename
return response.headers.get("ETag"), response.headers.get("filename")
def _run_model_pulling_worker(model_server, wait_time_between_pulls, project):
# type: (Text, int, Project) -> None
while True:
_update_model_from_server(model_server, project)
time.sleep(wait_time_between_pulls)
def start_model_pulling_in_worker(model_server, wait_time_between_pulls,
project):
# type: (Text, int, Project) -> None
worker = Thread(target=_run_model_pulling_worker,
args=(model_server, wait_time_between_pulls, project))
worker.setDaemon(True)
worker.start()
class Project(object):
def __init__(self,
component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
fingerprint=None):
self._component_builder = component_builder
self._models = {}
self.status = 0
self.current_training_processes = 0
self._reader_lock = Lock()
self._loader_lock = Lock()
self._writer_lock = Lock()
self._readers_count = 0
self._path = None
self._project = project
self.remote_storage = remote_storage
self.fingerprint = fingerprint
if project and project_dir:
self._path = os.path.join(project_dir, project)
self._search_for_models()
def _begin_read(self):
# Readers-writer lock basic double mutex implementation
self._reader_lock.acquire()
self._readers_count += 1
if self._readers_count == 1:
self._writer_lock.acquire()
self._reader_lock.release()
def _end_read(self):
self._reader_lock.acquire()
self._readers_count -= 1
if self._readers_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def _load_local_model(self, requested_model_name=None):
if requested_model_name is None: # user want latest model
# NOTE: for better parse performance, currently although
# user may want latest model by set requested_model_name
# explicitly to None, we are not refresh model list
# from local and cloud which is pretty slow.
# User can specific requested_model_name to the latest model name,
# then model will be cached, this is a kind of workaround to
# refresh latest project model.
# BTW if refresh function is wanted, maybe add implement code to
# `_latest_project_model()` is a good choice.
logger.debug("No model specified. Using default")
return self._latest_project_model()
elif requested_model_name in self._models: # model exists in cache
return requested_model_name
return None # local model loading failed!
def _dynamic_load_model(self, requested_model_name=None):
# type: (Text) -> Text
# first try load from local cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# now model not exists in model list cache
# refresh model list from local and cloud
# NOTE: if a malicious user sent lots of requests
# with not existing model will cause performance issue.
# because get anything from cloud is a time-consuming task
self._search_for_models()
# retry after re-fresh model cache
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
# still not found user specified model
logger.warn("Invalid model requested. Using default")
return self._latest_project_model()
def parse(self, text, time=None, requested_model_name=None):
self._begin_read()
model_name = self._dynamic_load_model(requested_model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
finally:
self._loader_lock.release()
response = self._models[model_name].parse(text, time)
response['project'] = self._project
response['model'] = model_name
self._end_read()
return response
def load_model(self):
self._begin_read()
status = False
model_name = self._dynamic_load_model()
logger.debug('Loading model %s', model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update_model_from_dir_and_unload_others(self,
model_dir, # type: Text
model_name # type: Text
):
# unload all loaded models
for model in self._list_loaded_models():
self.unload(model)
self._begin_read()
status = False
logger.debug('Loading model {} from directory {}'.format(
model_name, model_dir))
self._loader_lock.acquire()
try:
interpreter = self._interpreter_for_model(
model_name, model_dir)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update(self, model_name):
self._writer_lock.acquire()
self._models[model_name] = None
self._writer_lock.release()
def unload(self, model_name):
self._writer_lock.acquire()
try:
del self._models[model_name]
self._models[model_name] = None
return model_name
finally:
self._writer_lock.release()
def _latest_project_model(self):
"""Retrieves the latest trained model for an project"""
models = {model[len(MODEL_NAME_PREFIX):]: model
for model in self._models.keys()
if model.startswith(MODEL_NAME_PREFIX)}
if models:
time_list = [datetime.datetime.strptime(time, '%Y%m%d-%H%M%S')
for time, model in models.items()]
return models[max(time_list).strftime('%Y%m%d-%H%M%S')]
else:
return FALLBACK_MODEL_NAME
def _fallback_model(self):
meta = Metadata({"pipeline": [{
"name": "intent_classifier_keyword",
"class": utils.module_path_from_object(KeywordIntentClassifier())
}]}, "")
return Interpreter.create(meta, self._component_builder)
def _search_for_models(self):
model_names = (self._list_models_in_dir(self._path) +
self._list_models_in_cloud())
if not model_names:
if FALLBACK_MODEL_NAME not in self._models:
self._models[FALLBACK_MODEL_NAME] = self._fallback_model()
else:
for model in set(model_names):
if model not in self._models:
self._models[model] = None
def _interpreter_for_model(self, model_name, model_dir=None):
metadata = self._read_model_metadata(model_name, model_dir)
return Interpreter.create(metadata, self._component_builder)
def _read_model_metadata(self, model_name, model_dir):
if model_name is None:
data = Project._default_model_metadata()
return Metadata(data, model_name)
else:
if model_dir is not None:
path = model_dir
elif not os.path.isabs(model_name) and self._path:
path = os.path.join(self._path, model_name)
else:
path = model_name
# download model from cloud storage if needed and possible
if not os.path.isdir(path):
self._load_model_from_cloud(model_name, path)
return Metadata.load(path)
def as_dict(self):
return {'status': 'training' if self.status else 'ready',
'current_training_processes': self.current_training_processes,
'available_models': list(self._models.keys()),
'loaded_models': self._list_loaded_models()}
def _list_loaded_models(self):
models = []
for model, interpreter in self._models.items():
if interpreter is not None:
models.append(model)
return models
def _list_models_in_cloud(self):
# type: () -> List[Text]
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
return p.list_models(self._project)
else:
return []
except Exception as e:
logger.warn("Failed to list models of project {}. "
"{}".format(self._project, e))
return []
def _load_model_from_cloud(self, model_name, target_path):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
p.retrieve(model_name, self._project, target_path)
else:
raise RuntimeError("Unable to initialize persistor")
except Exception as e:
logger.warn("Using default interpreter, couldn't fetch "
"model: {}".format(e))
raise # re-raise this exception because nothing we can do now
@staticmethod
def _default_model_metadata():
return {
"language": None,
}
@staticmethod
def _list_models_in_dir(path):
if not path or not os.path.isdir(path):
return []
else:
return [os.path.relpath(model, path)
for model in utils.list_subdirectories(path)]
| 36.097674 | 86 | 0.616029 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import tempfile
import zipfile
from threading import Lock, Thread
from typing import Text, List
import six
import time
from builtins import object
from requests.exceptions import InvalidURL, RequestException
from rasa_nlu import utils
from rasa_nlu.classifiers.keyword_intent_classifier import \
KeywordIntentClassifier
from rasa_nlu.model import Metadata, Interpreter
from rasa_nlu.utils import is_url, EndpointConfig
if six.PY2:
from StringIO import StringIO as IOReader
else:
from io import BytesIO as IOReader
logger = logging.getLogger(__name__)
MODEL_NAME_PREFIX = "model_"
FALLBACK_MODEL_NAME = "fallback"
DEFAULT_REQUEST_TIMEOUT = 60 * 5
def load_from_server(component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
model_server=None,
wait_time_between_pulls=None,
):
project = Project(component_builder=component_builder,
project=project,
project_dir=project_dir,
remote_storage=remote_storage)
_update_model_from_server(model_server, project)
if wait_time_between_pulls:
start_model_pulling_in_worker(model_server,
wait_time_between_pulls,
project)
return project
def _update_model_from_server(model_server, project):
if not is_url(model_server.url):
raise InvalidURL(model_server)
model_directory = tempfile.mkdtemp()
new_model_fingerprint, filename = _pull_model_and_fingerprint(
model_server, model_directory, project.fingerprint)
if new_model_fingerprint:
model_name = _get_remote_model_name(filename)
project.fingerprint = new_model_fingerprint
project.update_model_from_dir_and_unload_others(model_directory,
model_name)
else:
logger.debug("No new model found at URL {}".format(model_server.url))
def _get_remote_model_name(filename):
if filename is not None:
return filename.strip(".zip")
else:
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return MODEL_NAME_PREFIX + timestamp
def _pull_model_and_fingerprint(model_server, model_directory, fingerprint):
header = {"If-None-Match": fingerprint}
try:
logger.debug("Requesting model from server {}..."
"".format(model_server.url))
response = model_server.request(method="GET",
headers=header,
timeout=DEFAULT_REQUEST_TIMEOUT)
except RequestException as e:
logger.warning("Tried to fetch model from server, but couldn't reach "
"server. We'll retry later... Error: {}."
"".format(e))
return None, None
if response.status_code == 204:
logger.debug("Model server returned 204 status code, indicating "
"that no new model is available. "
"Current fingerprint: {}".format(fingerprint))
return response.headers.get("ETag"), response.headers.get("filename")
elif response.status_code == 404:
logger.debug("Model server didn't find a model for our request. "
"Probably no one did train a model for the project "
"and tag combination yet.")
return None, None
elif response.status_code != 200:
logger.warn("Tried to fetch model from server, but server response "
"status code is {}. We'll retry later..."
"".format(response.status_code))
return None, None
zip_ref = zipfile.ZipFile(IOReader(response.content))
zip_ref.extractall(model_directory)
logger.debug("Unzipped model to {}"
"".format(os.path.abspath(model_directory)))
return response.headers.get("ETag"), response.headers.get("filename")
def _run_model_pulling_worker(model_server, wait_time_between_pulls, project):
while True:
_update_model_from_server(model_server, project)
time.sleep(wait_time_between_pulls)
def start_model_pulling_in_worker(model_server, wait_time_between_pulls,
project):
worker = Thread(target=_run_model_pulling_worker,
args=(model_server, wait_time_between_pulls, project))
worker.setDaemon(True)
worker.start()
class Project(object):
def __init__(self,
component_builder=None,
project=None,
project_dir=None,
remote_storage=None,
fingerprint=None):
self._component_builder = component_builder
self._models = {}
self.status = 0
self.current_training_processes = 0
self._reader_lock = Lock()
self._loader_lock = Lock()
self._writer_lock = Lock()
self._readers_count = 0
self._path = None
self._project = project
self.remote_storage = remote_storage
self.fingerprint = fingerprint
if project and project_dir:
self._path = os.path.join(project_dir, project)
self._search_for_models()
def _begin_read(self):
self._reader_lock.acquire()
self._readers_count += 1
if self._readers_count == 1:
self._writer_lock.acquire()
self._reader_lock.release()
def _end_read(self):
self._reader_lock.acquire()
self._readers_count -= 1
if self._readers_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def _load_local_model(self, requested_model_name=None):
if requested_model_name is None:
logger.debug("No model specified. Using default")
return self._latest_project_model()
elif requested_model_name in self._models:
return requested_model_name
return None
def _dynamic_load_model(self, requested_model_name=None):
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
self._search_for_models()
local_model = self._load_local_model(requested_model_name)
if local_model:
return local_model
logger.warn("Invalid model requested. Using default")
return self._latest_project_model()
def parse(self, text, time=None, requested_model_name=None):
self._begin_read()
model_name = self._dynamic_load_model(requested_model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
finally:
self._loader_lock.release()
response = self._models[model_name].parse(text, time)
response['project'] = self._project
response['model'] = model_name
self._end_read()
return response
def load_model(self):
self._begin_read()
status = False
model_name = self._dynamic_load_model()
logger.debug('Loading model %s', model_name)
self._loader_lock.acquire()
try:
if not self._models.get(model_name):
interpreter = self._interpreter_for_model(model_name)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update_model_from_dir_and_unload_others(self,
model_dir,
model_name
):
for model in self._list_loaded_models():
self.unload(model)
self._begin_read()
status = False
logger.debug('Loading model {} from directory {}'.format(
model_name, model_dir))
self._loader_lock.acquire()
try:
interpreter = self._interpreter_for_model(
model_name, model_dir)
self._models[model_name] = interpreter
status = True
finally:
self._loader_lock.release()
self._end_read()
return status
def update(self, model_name):
self._writer_lock.acquire()
self._models[model_name] = None
self._writer_lock.release()
def unload(self, model_name):
self._writer_lock.acquire()
try:
del self._models[model_name]
self._models[model_name] = None
return model_name
finally:
self._writer_lock.release()
def _latest_project_model(self):
models = {model[len(MODEL_NAME_PREFIX):]: model
for model in self._models.keys()
if model.startswith(MODEL_NAME_PREFIX)}
if models:
time_list = [datetime.datetime.strptime(time, '%Y%m%d-%H%M%S')
for time, model in models.items()]
return models[max(time_list).strftime('%Y%m%d-%H%M%S')]
else:
return FALLBACK_MODEL_NAME
def _fallback_model(self):
meta = Metadata({"pipeline": [{
"name": "intent_classifier_keyword",
"class": utils.module_path_from_object(KeywordIntentClassifier())
}]}, "")
return Interpreter.create(meta, self._component_builder)
def _search_for_models(self):
model_names = (self._list_models_in_dir(self._path) +
self._list_models_in_cloud())
if not model_names:
if FALLBACK_MODEL_NAME not in self._models:
self._models[FALLBACK_MODEL_NAME] = self._fallback_model()
else:
for model in set(model_names):
if model not in self._models:
self._models[model] = None
def _interpreter_for_model(self, model_name, model_dir=None):
metadata = self._read_model_metadata(model_name, model_dir)
return Interpreter.create(metadata, self._component_builder)
def _read_model_metadata(self, model_name, model_dir):
if model_name is None:
data = Project._default_model_metadata()
return Metadata(data, model_name)
else:
if model_dir is not None:
path = model_dir
elif not os.path.isabs(model_name) and self._path:
path = os.path.join(self._path, model_name)
else:
path = model_name
if not os.path.isdir(path):
self._load_model_from_cloud(model_name, path)
return Metadata.load(path)
def as_dict(self):
return {'status': 'training' if self.status else 'ready',
'current_training_processes': self.current_training_processes,
'available_models': list(self._models.keys()),
'loaded_models': self._list_loaded_models()}
def _list_loaded_models(self):
models = []
for model, interpreter in self._models.items():
if interpreter is not None:
models.append(model)
return models
def _list_models_in_cloud(self):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
return p.list_models(self._project)
else:
return []
except Exception as e:
logger.warn("Failed to list models of project {}. "
"{}".format(self._project, e))
return []
def _load_model_from_cloud(self, model_name, target_path):
try:
from rasa_nlu.persistor import get_persistor
p = get_persistor(self.remote_storage)
if p is not None:
p.retrieve(model_name, self._project, target_path)
else:
raise RuntimeError("Unable to initialize persistor")
except Exception as e:
logger.warn("Using default interpreter, couldn't fetch "
"model: {}".format(e))
raise # re-raise this exception because nothing we can do now
@staticmethod
def _default_model_metadata():
return {
"language": None,
}
@staticmethod
def _list_models_in_dir(path):
if not path or not os.path.isdir(path):
return []
else:
return [os.path.relpath(model, path)
for model in utils.list_subdirectories(path)]
| true | true |
f71c881a51efe3fd38a5ddad27bb876a0a24ab7d | 8,497 | py | Python | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | pytype/tests/test_namedtuple.py | ashwinprasadme/pytype | fed209c73aacfcab15efc33deef3b4016a67cfe5 | [
"Apache-2.0"
] | null | null | null | """Tests for the namedtuple implementation in collections_overlay.py."""
import textwrap
from pytype import file_utils
from pytype.overlays import collections_overlay
from pytype.pytd import escape
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedtupleTests(test_base.TargetIndependentTest):
"""Tests for collections.namedtuple."""
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.python_version)
def _namedtuple_def(self, suffix="", **kws):
"""Generate the expected pyi for a simple namedtuple definition.
Args:
suffix: Optionally, extra text to append to the pyi.
**kws: Must contain exactly one argument of the form
alias=(name, [<fields>]). For example, to generate a definition for
X = namedtuple("_X", "y z"), the method call should be
_namedtuple_def(X=("_X", ["y", "z"])).
Returns:
The expected pyi for the namedtuple instance.
"""
(alias, (name, fields)), = kws.items() # pylint: disable=unbalanced-tuple-unpacking
name = escape.pack_namedtuple(name, fields)
suffix += textwrap.dedent("""
collections = ... # type: module
{alias} = {name}""").format(alias=alias, name=name)
return pytd_utils.Print(self._namedtuple_ast(name, fields)) + "\n" + suffix
def test_basic_namedtuple(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", ["y", "z"])
a = X(y=1, z=2)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["y", "z"]), suffix="a = ... # type: X"))
def test_no_fields(self):
ty = self.Infer("""
import collections
F = collections.namedtuple("F", [])
a = F()
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("F", []), suffix="a = ... # type: F"))
def test_str_args(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "a b c")
b = S(1, 2, 3)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
S=("S", ["a", "b", "c"]), suffix="b = ... # type: S"))
def test_str_args2(self):
self.Check("""
import collections
collections.namedtuple("_", "a,b,c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a, b, c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a ,b")
""")
def test_bad_fieldnames(self):
self.InferWithErrors("""
import collections
collections.namedtuple("_", ["abc", "def", "ghi"]) # invalid-namedtuple-arg
collections.namedtuple("_", "_") # invalid-namedtuple-arg
collections.namedtuple("_", "a, 1") # invalid-namedtuple-arg
collections.namedtuple("_", "a, !") # invalid-namedtuple-arg
collections.namedtuple("_", "a, b, c, a") # invalid-namedtuple-arg
collections.namedtuple("1", "") # invalid-namedtuple-arg
""")
def test_rename(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "abc def ghi abc", rename=True)
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(S=("S", ["abc", "_1", "ghi", "_3"])))
def test_bad_initialize(self):
self.InferWithErrors("""
from collections import namedtuple
X = namedtuple("X", "y z")
a = X(1) # missing-parameter
b = X(y = 2) # missing-parameter
c = X(w = 3) # wrong-keyword-args
d = X(y = "hello", z = 4j) # works
""")
def test_class_name(self):
ty = self.Infer(
"""
import collections
F = collections.namedtuple("S", ['a', 'b', 'c'])
""")
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("S", ["a", "b", "c"])))
def test_constructors(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
g = X(1, 2, 3)
i = X._make((7, 8, 9))
j = X._make((10, 11, 12), tuple.__new__, len)
""")
def test_instance_types(self):
ty = self.Infer(
"""
import collections
X = collections.namedtuple("X", "a b c")
a = X._make((1, 2, 3))
""")
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["a", "b", "c"]), suffix="a = ... # type: X"))
def test_instantiate_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple('X', [('y', str), ('z', int)])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X() # missing-parameter[e1]
foo.X(0, "") # wrong-arg-types[e2]
foo.X(z="", y=0) # wrong-arg-types[e3]
foo.X("", 0)
foo.X(y="", z=0)
""", pythonpath=[d.path])
self.assertErrorRegexes(
errors, {"e1": r"y", "e2": r"str.*int", "e3": r"str.*int"})
def test_use_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X()._replace()
foo.X().nonsense # attribute-error[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"nonsense.*X"})
def test_subclass_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [("y", int)])): ...
""")
self.Check("""
import foo
class Y(foo.X):
def __new__(cls):
return super(Y, cls).__new__(cls, 0)
Y()
""", pythonpath=[d.path])
def test_varargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
args = None # type: list
X(*args)
""")
def test_kwargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
kwargs = None # type: dict
X(**kwargs)
""")
def test_name_conflict(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("_", [])
Y = collections.namedtuple("_", [])
Z = collections.namedtuple("_", "a")
""", deep=False)
name_x = escape.pack_namedtuple("_", [])
name_z = escape.pack_namedtuple("_", ["a"])
ast_x = self._namedtuple_ast(name_x, [])
ast_z = self._namedtuple_ast(name_z, ["a"])
ast = pytd_utils.Concat(ast_x, ast_z)
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
X = {name_x}
Y = {name_x}
Z = {name_z}""").format(name_x=name_x, name_z=name_z)
self.assertTypesMatchPytd(ty, expected)
def test_subclass(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple("X", [])):
def __new__(cls, _):
return super(X, cls).__new__(cls)
""")
name = escape.pack_namedtuple("X", [])
ast = self._namedtuple_ast(name, [])
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
_TX = TypeVar("_TX", bound=X)
class X({name}):
def __new__(cls: Type[_TX], _) -> _TX: ...""").format(name=name)
self.assertTypesMatchPytd(ty, expected)
def test_subclass_replace(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y(1)._replace(a=2)
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_subclass_make(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y._make([1])
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_unpacking(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [('a', str), ('b', int)])
""")
ty = self.Infer("""
import foo
v = None # type: foo.X
a, b = v
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
v = ... # type: foo.namedtuple_X_0
a = ... # type: str
b = ... # type: int
""")
test_base.main(globals(), __name__ == "__main__")
| 31.354244 | 88 | 0.564905 |
import textwrap
from pytype import file_utils
from pytype.overlays import collections_overlay
from pytype.pytd import escape
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedtupleTests(test_base.TargetIndependentTest):
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.python_version)
def _namedtuple_def(self, suffix="", **kws):
(alias, (name, fields)), = kws.items()
name = escape.pack_namedtuple(name, fields)
suffix += textwrap.dedent("""
collections = ... # type: module
{alias} = {name}""").format(alias=alias, name=name)
return pytd_utils.Print(self._namedtuple_ast(name, fields)) + "\n" + suffix
def test_basic_namedtuple(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", ["y", "z"])
a = X(y=1, z=2)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["y", "z"]), suffix="a = ... # type: X"))
def test_no_fields(self):
ty = self.Infer("""
import collections
F = collections.namedtuple("F", [])
a = F()
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("F", []), suffix="a = ... # type: F"))
def test_str_args(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "a b c")
b = S(1, 2, 3)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
S=("S", ["a", "b", "c"]), suffix="b = ... # type: S"))
def test_str_args2(self):
self.Check("""
import collections
collections.namedtuple("_", "a,b,c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a, b, c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a ,b")
""")
def test_bad_fieldnames(self):
self.InferWithErrors("""
import collections
collections.namedtuple("_", ["abc", "def", "ghi"]) # invalid-namedtuple-arg
collections.namedtuple("_", "_") # invalid-namedtuple-arg
collections.namedtuple("_", "a, 1") # invalid-namedtuple-arg
collections.namedtuple("_", "a, !") # invalid-namedtuple-arg
collections.namedtuple("_", "a, b, c, a") # invalid-namedtuple-arg
collections.namedtuple("1", "") # invalid-namedtuple-arg
""")
def test_rename(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "abc def ghi abc", rename=True)
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(S=("S", ["abc", "_1", "ghi", "_3"])))
def test_bad_initialize(self):
self.InferWithErrors("""
from collections import namedtuple
X = namedtuple("X", "y z")
a = X(1) # missing-parameter
b = X(y = 2) # missing-parameter
c = X(w = 3) # wrong-keyword-args
d = X(y = "hello", z = 4j) # works
""")
def test_class_name(self):
ty = self.Infer(
"""
import collections
F = collections.namedtuple("S", ['a', 'b', 'c'])
""")
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("S", ["a", "b", "c"])))
def test_constructors(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
g = X(1, 2, 3)
i = X._make((7, 8, 9))
j = X._make((10, 11, 12), tuple.__new__, len)
""")
def test_instance_types(self):
ty = self.Infer(
"""
import collections
X = collections.namedtuple("X", "a b c")
a = X._make((1, 2, 3))
""")
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["a", "b", "c"]), suffix="a = ... # type: X"))
def test_instantiate_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple('X', [('y', str), ('z', int)])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X() # missing-parameter[e1]
foo.X(0, "") # wrong-arg-types[e2]
foo.X(z="", y=0) # wrong-arg-types[e3]
foo.X("", 0)
foo.X(y="", z=0)
""", pythonpath=[d.path])
self.assertErrorRegexes(
errors, {"e1": r"y", "e2": r"str.*int", "e3": r"str.*int"})
def test_use_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X()._replace()
foo.X().nonsense # attribute-error[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"nonsense.*X"})
def test_subclass_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [("y", int)])): ...
""")
self.Check("""
import foo
class Y(foo.X):
def __new__(cls):
return super(Y, cls).__new__(cls, 0)
Y()
""", pythonpath=[d.path])
def test_varargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
args = None # type: list
X(*args)
""")
def test_kwargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
kwargs = None # type: dict
X(**kwargs)
""")
def test_name_conflict(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("_", [])
Y = collections.namedtuple("_", [])
Z = collections.namedtuple("_", "a")
""", deep=False)
name_x = escape.pack_namedtuple("_", [])
name_z = escape.pack_namedtuple("_", ["a"])
ast_x = self._namedtuple_ast(name_x, [])
ast_z = self._namedtuple_ast(name_z, ["a"])
ast = pytd_utils.Concat(ast_x, ast_z)
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
X = {name_x}
Y = {name_x}
Z = {name_z}""").format(name_x=name_x, name_z=name_z)
self.assertTypesMatchPytd(ty, expected)
def test_subclass(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple("X", [])):
def __new__(cls, _):
return super(X, cls).__new__(cls)
""")
name = escape.pack_namedtuple("X", [])
ast = self._namedtuple_ast(name, [])
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
_TX = TypeVar("_TX", bound=X)
class X({name}):
def __new__(cls: Type[_TX], _) -> _TX: ...""").format(name=name)
self.assertTypesMatchPytd(ty, expected)
def test_subclass_replace(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y(1)._replace(a=2)
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_subclass_make(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y._make([1])
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_unpacking(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [('a', str), ('b', int)])
""")
ty = self.Infer("""
import foo
v = None # type: foo.X
a, b = v
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
v = ... # type: foo.namedtuple_X_0
a = ... # type: str
b = ... # type: int
""")
test_base.main(globals(), __name__ == "__main__")
| true | true |
f71c885784aeccc154dd5cca2413ad6060ae4e6b | 3,087 | py | Python | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | 1 | 2021-11-14T21:18:43.000Z | 2021-11-14T21:18:43.000Z | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | null | null | null | tests/tests_hrv.py | raimonpv/NeuroKit | cb37d83ee20d6a13a91c4848aa435f41e979e203 | [
"MIT"
] | 1 | 2021-11-14T21:18:48.000Z | 2021-11-14T21:18:48.000Z | import numpy as np
import neurokit2 as nk
def test_hrv_time():
ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)
ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)
_, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)
hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)
hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)
assert np.all(hrv_fast["HRV_RMSSD"] < hrv_slow["HRV_RMSSD"])
assert np.all(hrv_fast["HRV_MeanNN"] < hrv_slow["HRV_MeanNN"])
assert np.all(hrv_fast["HRV_SDNN"] < hrv_slow["HRV_SDNN"])
assert np.all(hrv_fast["HRV_CVNN"] < hrv_slow["HRV_CVNN"])
assert np.all(hrv_fast["HRV_CVSD"] < hrv_slow["HRV_CVSD"])
assert np.all(hrv_fast["HRV_MedianNN"] < hrv_slow["HRV_MedianNN"])
assert np.all(hrv_fast["HRV_MadNN"] < hrv_slow["HRV_MadNN"])
assert np.all(hrv_fast["HRV_MCVNN"] < hrv_slow["HRV_MCVNN"])
assert np.all(hrv_fast["HRV_pNN50"] == hrv_slow["HRV_pNN50"])
assert np.all(hrv_fast["HRV_pNN20"] < hrv_slow["HRV_pNN20"])
assert np.all(hrv_fast["HRV_TINN"] < hrv_slow["HRV_TINN"])
assert np.all(hrv_fast["HRV_HTI"] > hrv_slow["HRV_HTI"])
def test_hrv_frequency():
# Test frequency domain
ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)
_, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)
hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)
ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)
_, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)
hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)
assert np.allclose(hrv1["HRV_HF"] - hrv2["HRV_HF"], 0, atol=1.5)
assert np.isnan(hrv1["HRV_LF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
assert np.isnan(hrv1["HRV_VLF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
def test_hrv():
ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
ecg_hrv = nk.hrv(peaks, sampling_rate=1000)
columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',
'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',
'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',
'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',
'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',
'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',
'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',
'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',
'HRV_SD1a', 'HRV_C2d',
'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',
'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',
'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']
assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem
in columns) | 44.73913 | 96 | 0.661808 | import numpy as np
import neurokit2 as nk
def test_hrv_time():
ecg_slow = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=70, random_state=42)
ecg_fast = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks_slow = nk.ecg_process(ecg_slow, sampling_rate=1000)
_, peaks_fast = nk.ecg_process(ecg_fast, sampling_rate=1000)
hrv_slow = nk.hrv_time(peaks_slow, sampling_rate=1000)
hrv_fast = nk.hrv_time(peaks_fast, sampling_rate=1000)
assert np.all(hrv_fast["HRV_RMSSD"] < hrv_slow["HRV_RMSSD"])
assert np.all(hrv_fast["HRV_MeanNN"] < hrv_slow["HRV_MeanNN"])
assert np.all(hrv_fast["HRV_SDNN"] < hrv_slow["HRV_SDNN"])
assert np.all(hrv_fast["HRV_CVNN"] < hrv_slow["HRV_CVNN"])
assert np.all(hrv_fast["HRV_CVSD"] < hrv_slow["HRV_CVSD"])
assert np.all(hrv_fast["HRV_MedianNN"] < hrv_slow["HRV_MedianNN"])
assert np.all(hrv_fast["HRV_MadNN"] < hrv_slow["HRV_MadNN"])
assert np.all(hrv_fast["HRV_MCVNN"] < hrv_slow["HRV_MCVNN"])
assert np.all(hrv_fast["HRV_pNN50"] == hrv_slow["HRV_pNN50"])
assert np.all(hrv_fast["HRV_pNN20"] < hrv_slow["HRV_pNN20"])
assert np.all(hrv_fast["HRV_TINN"] < hrv_slow["HRV_TINN"])
assert np.all(hrv_fast["HRV_HTI"] > hrv_slow["HRV_HTI"])
def test_hrv_frequency():
ecg1 = nk.ecg_simulate(duration=60, sampling_rate=2000, heart_rate=70, random_state=42)
_, peaks1 = nk.ecg_process(ecg1, sampling_rate=2000)
hrv1 = nk.hrv_frequency(peaks1, sampling_rate=2000)
ecg2 = nk.signal_resample(ecg1, sampling_rate=2000, desired_sampling_rate=500)
_, peaks2 = nk.ecg_process(ecg2, sampling_rate=500)
hrv2 = nk.hrv_frequency(peaks2, sampling_rate=500)
assert np.allclose(hrv1["HRV_HF"] - hrv2["HRV_HF"], 0, atol=1.5)
assert np.isnan(hrv1["HRV_LF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
assert np.isnan(hrv1["HRV_VLF"][0])
assert np.isnan(hrv2["HRV_LF"][0])
def test_hrv():
ecg = nk.ecg_simulate(duration=60, sampling_rate=1000, heart_rate=110, random_state=42)
_, peaks = nk.ecg_process(ecg, sampling_rate=1000)
ecg_hrv = nk.hrv(peaks, sampling_rate=1000)
columns = ['HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN',
'HRV_CVSD', 'HRV_MedianNN', 'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN',
'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI', 'HRV_ULF',
'HRV_VLF', 'HRV_LF', 'HRV_HF', 'HRV_VHF', 'HRV_LFHF', 'HRV_LFn',
'HRV_HFn', 'HRV_LnHF', 'HRV_SD1', 'HRV_SD2', 'HRV_SD1SD2', 'HRV_S',
'HRV_CSI', 'HRV_CVI', 'HRV_CSI_Modified', 'HRV_PIP', 'HRV_IALS',
'HRV_PSS', 'HRV_PAS', 'HRV_GI', 'HRV_SI', 'HRV_AI', 'HRV_PI',
'HRV_C1d', 'HRV_C1a', 'HRV_SD1d',
'HRV_SD1a', 'HRV_C2d',
'HRV_C2a', 'HRV_SD2d', 'HRV_SD2a',
'HRV_Cd', 'HRV_Ca', 'HRV_SDNNd',
'HRV_SDNNa', 'HRV_ApEn', 'HRV_SampEn']
assert all(elem in np.array(ecg_hrv.columns.values, dtype=object) for elem
in columns) | true | true |
f71c887dca4cf691587ab051359773359de7010e | 3,226 | bzl | Python | build_tools/bazel/iree_lit_test.bzl | smit-hinsu/iree | a385d311b701cdc06cb825000ddb34c8a11c6eef | [
"Apache-2.0"
] | 1 | 2022-02-13T15:27:08.000Z | 2022-02-13T15:27:08.000Z | build_tools/bazel/iree_lit_test.bzl | iree-github-actions-bot/iree | 9982f10090527a1a86cd280b4beff9a579b96b38 | [
"Apache-2.0"
] | 1 | 2022-01-27T18:10:51.000Z | 2022-01-27T18:10:51.000Z | build_tools/bazel/iree_lit_test.bzl | iree-github-actions-bot/iree | 9982f10090527a1a86cd280b4beff9a579b96b38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Bazel macros for running lit tests."""
load(":lit_test.bzl", "lit_test", "lit_test_suite")
def iree_lit_test(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
"""A thin wrapper around lit_test with some opinionated settings.
See the base lit_test for more details on argument meanings.
Args:
name: name for the test.
cfg: string. lit config file.
tools: label_list. tools that should be included on the PATH.
llvm-symbolizer is added by default.
env: string_dict. Environment variables available to the test at runtime.
FILECHECK_OPTS=--enable-var-scope is added if FILECHECK_OPTS is not
already set.
**kwargs: additional keyword args to forward to the underyling lit_test.
"""
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
def iree_lit_test_suite(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
"""A thin wrapper around lit_test_suite with some opinionated settings.
See the base lit_test for more details on argument meanings.
Args:
name: name for the test suite.
cfg: string. lit config file.
tools: label_list. tools that should be included on the PATH.
llvm-symbolizer is added by default.
env: string_dict. Environment variables available to the test at runtime.
FILECHECK_OPTS=--enable-var-scope is added if FILECHECK_OPTS is not
already set.
**kwargs: additional keyword args to forward to the underyling
lit_test_suite.
"""
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test_suite(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
| 32.918367 | 79 | 0.6584 |
load(":lit_test.bzl", "lit_test", "lit_test_suite")
def iree_lit_test(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
tools = tools or []
env = env or {}
# when it does, I think better to hardcode it here.
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
def iree_lit_test_suite(
name,
cfg = "//iree:lit.cfg.py",
tools = None,
env = None,
**kwargs):
tools = tools or []
env = env or {}
# Always include llvm-symbolizer so we get useful stack traces. Maybe it
# would be better to force everyone to do this explicitly, but since
# forgetting wouldn't cause the test to fail, only make debugging harder
llvm_symbolizer = "@llvm-project//llvm:llvm-symbolizer"
if llvm_symbolizer not in tools:
tools.append(llvm_symbolizer)
filecheck_env_var = "FILECHECK_OPTS"
if filecheck_env_var not in env:
env[filecheck_env_var] = "--enable-var-scope"
lit_test_suite(
name = name,
cfg = cfg,
tools = tools,
env = env,
**kwargs
)
| true | true |
f71c8959b58f25069e1143ec6f69c7935fd4843b | 8,176 | py | Python | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 1 | 2019-05-06T19:40:43.000Z | 2019-05-06T19:40:43.000Z | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 9 | 2019-12-04T22:57:46.000Z | 2022-02-10T07:15:11.000Z | safe/view.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 3 | 2019-05-01T20:41:33.000Z | 2019-10-03T20:57:00.000Z | from people.models import Village, Mother, Driver, HealthCenter, MotherDriverConnection
from django.http import JsonResponse, Http404
from django.core import serializers
from decouple import config
from .geokdbush.geokdbush import around, distance
import requests
import json
import time
FRONTLINE_KEY = config('FRONTLINESMS_SECRET')
MASTER_PHONE = config('MASTER_PHONE')
def village(request, id):
try:
v_obj = Village.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except Village.DoesNotExist:
raise Http404("Village does not exist")
return JsonResponse(data)
def healthcenter(request, id):
try:
v_obj = HealthCenter.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except HealthCenter.DoesNotExist:
raise Http404("HealthCenter does not exist")
return JsonResponse(data)
def mother(request, id):
try:
v_obj = Mother.objects.get(phone=id)
mom_lat = v_obj.latitude
mom_lon = v_obj.longitude
# get all the drivers registered
drivers = Driver.objects.values()
# build the list of drivers
driversLocList = []
for d in drivers:
if d["available"]:
driversLocList.append({
"name": d["name"],
"phone": d["phone"],
"lat": d["latitude"],
"lon": d["longitude"]
})
momloc = {"lon": mom_lon, "lat": mom_lat}
driversList = []
for d in driversLocList:
dist = distance(momloc["lon"], momloc["lat"], d["lon"], d["lat"])
driversList.append((d["name"], d["phone"], dist))
# time to sort the list - sort by 3rd item (distance)
def getKey(item):
return item[2]
closestList = sorted(driversList, key=getKey)
data = {
'name': v_obj.name,
'phone': v_obj.phone,
'village': v_obj.village,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
"Drivers": closestList
}
except Mother.DoesNotExist:
register_msg = "No entry found for " + id + \
"\nPlease reply with 'village' and your village name.\nFor example, 'village Iganga'"
url = 'https://cloud.frontlinesms.com/api/1/webhook'
payload = {"apiKey": FRONTLINE_KEY, "payload": {
"message": register_msg, "recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": register_msg})
# raise Http404("Mother does not exist")
print("MOTHER phone number", v_obj.phone)
# Populate many-to-many table (MotherDriverConnection)
MotherDriverConnection.objects.create(motherPhoneNumber=v_obj.phone, motherName=v_obj.name, motherVillage=v_obj.village, driverPhoneNumber=closestList[0][1], driverIsComing=False)
# ping the SMS server with closest driver
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Can you pick up a mother at "+ data["village"] + " village. " \
"\nIf yes, reply with '1', if no, reply with '2'."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": closestList[0][1]}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(data)
def regMother(request, id):
parsed = id.split('&', 1)
momPhone = parsed[0]
momVillage = parsed[1]
# see if village send via SMS is in the database
villages = Village.objects.values()
listVillages = list(villages)
try:
village = list(
filter(lambda v: v["name"].lower() == momVillage.lower(), listVillages))
except:
print("NOT FOUND VILLAGE")
return JsonResponse({"msg": "village " + momVillage + " not found."})
momObject = {
"name": "a mother",
"phone": momPhone,
"village": village[0]["name"],
"latitude": village[0]["latitude"],
"longitude": village[0]["longitude"],
}
# enter this mom into database
try:
query = Mother(name="mom", phone=momPhone,
village=village[0]["name"],
latitude=village[0]["latitude"],
longitude=village[0]["longitude"],)
query.save()
except:
# ToDo: send a text to person monitoring the system
return JsonResponse({"msg": "Error adding new mom to db"})
url = 'https://cloud.frontlinesms.com/api/1/webhook'
mom_msg = "You are registered. Please text 'driver' to request a pickup."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": mom_msg,
"recipients": [{"type": "mobile", "value": momPhone}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(momObject)
def driverOnOffDuty(request, id, onDutyFlag):
try:
m_obj = MotherDriverConnection.objects.filter(driverPhoneNumber=id).values()
json_res = []
for key in m_obj:
m_json = dict(key)
json_res.append(m_json)
if onDutyFlag == 1:
Driver.objects.filter(phone=id).update(available = False)
# build YES url to
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Please pick up " + \
json_res[0]["motherName"] + " at " + json_res[0]["motherVillage"] + \
" village. Her number is " + \
json_res[0]["motherPhoneNumber"] + "\nPlease text her to let her know you are on the way."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": json_res[0]["driverPhoneNumber"]}]}}
r = requests.post(url, data=json.dumps(payload))
# delete connection
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
return JsonResponse({"data": pickup_msg})
if onDutyFlag == 2:
flag = False
Driver.objects.filter(phone=id).update(available = flag)
# delete this connection
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
# API call here to get next driver/make new connection
mother(request, json_res[0]["motherPhoneNumber"])
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
return JsonResponse({"Driver":"Successfully updated"})
def driverOnline(request, id, onlineFlag):
try:
if onlineFlag == "online":
Driver.objects.filter(phone=id).update(available = True)
# build online url
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now online. Reply with 'offline' to go offline."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
if onlineFlag == "offline":
Driver.objects.filter(phone=id).update(available = False)
# build offline url
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now offline. Reply with 'online' to go online."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
| 40.676617 | 183 | 0.57999 | from people.models import Village, Mother, Driver, HealthCenter, MotherDriverConnection
from django.http import JsonResponse, Http404
from django.core import serializers
from decouple import config
from .geokdbush.geokdbush import around, distance
import requests
import json
import time
FRONTLINE_KEY = config('FRONTLINESMS_SECRET')
MASTER_PHONE = config('MASTER_PHONE')
def village(request, id):
try:
v_obj = Village.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except Village.DoesNotExist:
raise Http404("Village does not exist")
return JsonResponse(data)
def healthcenter(request, id):
try:
v_obj = HealthCenter.objects.get(pk=id)
data = {
'name': v_obj.name,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
}
except HealthCenter.DoesNotExist:
raise Http404("HealthCenter does not exist")
return JsonResponse(data)
def mother(request, id):
try:
v_obj = Mother.objects.get(phone=id)
mom_lat = v_obj.latitude
mom_lon = v_obj.longitude
drivers = Driver.objects.values()
driversLocList = []
for d in drivers:
if d["available"]:
driversLocList.append({
"name": d["name"],
"phone": d["phone"],
"lat": d["latitude"],
"lon": d["longitude"]
})
momloc = {"lon": mom_lon, "lat": mom_lat}
driversList = []
for d in driversLocList:
dist = distance(momloc["lon"], momloc["lat"], d["lon"], d["lat"])
driversList.append((d["name"], d["phone"], dist))
def getKey(item):
return item[2]
closestList = sorted(driversList, key=getKey)
data = {
'name': v_obj.name,
'phone': v_obj.phone,
'village': v_obj.village,
'latitude': v_obj.latitude,
'longitude': v_obj.longitude,
"Drivers": closestList
}
except Mother.DoesNotExist:
register_msg = "No entry found for " + id + \
"\nPlease reply with 'village' and your village name.\nFor example, 'village Iganga'"
url = 'https://cloud.frontlinesms.com/api/1/webhook'
payload = {"apiKey": FRONTLINE_KEY, "payload": {
"message": register_msg, "recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": register_msg})
print("MOTHER phone number", v_obj.phone)
MotherDriverConnection.objects.create(motherPhoneNumber=v_obj.phone, motherName=v_obj.name, motherVillage=v_obj.village, driverPhoneNumber=closestList[0][1], driverIsComing=False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Can you pick up a mother at "+ data["village"] + " village. " \
"\nIf yes, reply with '1', if no, reply with '2'."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": closestList[0][1]}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(data)
def regMother(request, id):
parsed = id.split('&', 1)
momPhone = parsed[0]
momVillage = parsed[1]
villages = Village.objects.values()
listVillages = list(villages)
try:
village = list(
filter(lambda v: v["name"].lower() == momVillage.lower(), listVillages))
except:
print("NOT FOUND VILLAGE")
return JsonResponse({"msg": "village " + momVillage + " not found."})
momObject = {
"name": "a mother",
"phone": momPhone,
"village": village[0]["name"],
"latitude": village[0]["latitude"],
"longitude": village[0]["longitude"],
}
try:
query = Mother(name="mom", phone=momPhone,
village=village[0]["name"],
latitude=village[0]["latitude"],
longitude=village[0]["longitude"],)
query.save()
except:
return JsonResponse({"msg": "Error adding new mom to db"})
url = 'https://cloud.frontlinesms.com/api/1/webhook'
mom_msg = "You are registered. Please text 'driver' to request a pickup."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": mom_msg,
"recipients": [{"type": "mobile", "value": momPhone}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse(momObject)
def driverOnOffDuty(request, id, onDutyFlag):
try:
m_obj = MotherDriverConnection.objects.filter(driverPhoneNumber=id).values()
json_res = []
for key in m_obj:
m_json = dict(key)
json_res.append(m_json)
if onDutyFlag == 1:
Driver.objects.filter(phone=id).update(available = False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
pickup_msg = "Please pick up " + \
json_res[0]["motherName"] + " at " + json_res[0]["motherVillage"] + \
" village. Her number is " + \
json_res[0]["motherPhoneNumber"] + "\nPlease text her to let her know you are on the way."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": pickup_msg,
"recipients": [{"type": "mobile", "value": json_res[0]["driverPhoneNumber"]}]}}
r = requests.post(url, data=json.dumps(payload))
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
return JsonResponse({"data": pickup_msg})
if onDutyFlag == 2:
flag = False
Driver.objects.filter(phone=id).update(available = flag)
MotherDriverConnection.objects.filter(driverPhoneNumber=id).delete()
mother(request, json_res[0]["motherPhoneNumber"])
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
return JsonResponse({"Driver":"Successfully updated"})
def driverOnline(request, id, onlineFlag):
try:
if onlineFlag == "online":
Driver.objects.filter(phone=id).update(available = True)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now online. Reply with 'offline' to go offline."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
if onlineFlag == "offline":
Driver.objects.filter(phone=id).update(available = False)
url = 'https://cloud.frontlinesms.com/api/1/webhook'
online_msg = "You are now offline. Reply with 'online' to go online."
payload = {"apiKey": FRONTLINE_KEY, "payload": {"message": online_msg,
"recipients": [{"type": "mobile", "value": id}]}}
r = requests.post(url, data=json.dumps(payload))
return JsonResponse({"data": online_msg})
except Driver.DoesNotExist:
raise Http404("Driver does not exist")
| true | true |
f71c8a133ef8994968d105d86d6a4f81b0c891b8 | 3,888 | py | Python | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 124 | 2018-04-21T23:26:57.000Z | 2022-01-24T14:34:26.000Z | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 144 | 2018-05-21T13:57:01.000Z | 2022-03-31T13:07:42.000Z | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 36 | 2018-07-01T19:09:42.000Z | 2022-03-31T16:04:47.000Z | import argparse
import os
import tempfile
from getpass import getuser
import skein
from skein.tornado import SimpleAuthMixin, KerberosAuthMixin, init_kerberos
from tornado import web, ioloop
# An argument parser for configuring the application
parser = argparse.ArgumentParser(
description="A web service for submitting python scripts to YARN."
)
parser.add_argument(
"--keytab", default=None,
help=("The location of a keytab file. If not specified, 'simple' "
"authentication will be used")
)
parser.add_argument(
"--principal", default=None,
help=("The principal to use if using kerberos. Defaults to the "
"current user name.")
)
parser.add_argument(
"--port", default=8888, type=int,
help="The port to serve from. Default is 8888."
)
args = parser.parse_args()
if args.keytab:
# Use the kerberos auth mixin, and initialize kerberos for HTTP auth
AuthMixin = KerberosAuthMixin
init_kerberos(keytab=args.keytab)
# Also create the skein client with keytab and principal specified
skein_client = skein.Client(
keytab=args.keytab,
principal=args.principal or getuser()
)
else:
# Use the simple auth mixin
AuthMixin = SimpleAuthMixin
skein_client = skein.Client()
# Read in the `index.html` source
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, "index.html")) as f:
INDEX_HTML = f.read()
class LaunchHandler(AuthMixin, web.RequestHandler):
@property
def client(self):
return self.settings['client']
@web.authenticated
def get(self):
# Main page just displays the web form
self.write(INDEX_HTML)
@web.authenticated
async def post(self):
# Extract request parameters
queue = self.get_argument('queue') or 'default'
memory = float(self.get_argument('memory'))
vcores = int(self.get_argument('vcores'))
try:
script = self.request.files['script'][0]
except (IndexError, KeyError):
raise web.HTTPError(400, reason="Missing script")
# Check memory and vcores are in bounds
if memory < 0.5 or memory > 8:
raise web.HTTPError("0.5 <= memory <= 8 required")
if vcores < 1 or vcores > 4:
raise web.HTTPError("1 <= vcores <= 4 required")
# We need to write the script temporarily to disk so Skein can upload it
with tempfile.NamedTemporaryFile() as f:
f.write(script['body'])
f.file.flush()
# ** Construct the application specification **
# Note that we specify the user as user logged in to the web page.
# If kerberos authentication was used, this would match the user's
# principal.
spec = skein.ApplicationSpec(
name="pyscript",
queue=queue,
user=self.current_user,
master=skein.Master(
resources=skein.Resources(
memory="%f GiB" % memory,
vcores=vcores
),
files={script['filename']: f.name},
script="python %s" % script['filename']
)
)
# Submit the application and get a report
report = await ioloop.IOLoop.current().run_in_executor(
None, self.submit_and_report, spec
)
# Redirect the user to the application's tracking url
self.redirect(report.tracking_url)
def submit_and_report(self, spec):
app_id = self.client.submit(spec)
report = self.client.application_report(app_id)
return report
# Start the application and serve on the specified port
app = web.Application([("/", LaunchHandler)], client=skein_client)
app.listen(args.port)
ioloop.IOLoop.current().start()
| 31.868852 | 80 | 0.626286 | import argparse
import os
import tempfile
from getpass import getuser
import skein
from skein.tornado import SimpleAuthMixin, KerberosAuthMixin, init_kerberos
from tornado import web, ioloop
parser = argparse.ArgumentParser(
description="A web service for submitting python scripts to YARN."
)
parser.add_argument(
"--keytab", default=None,
help=("The location of a keytab file. If not specified, 'simple' "
"authentication will be used")
)
parser.add_argument(
"--principal", default=None,
help=("The principal to use if using kerberos. Defaults to the "
"current user name.")
)
parser.add_argument(
"--port", default=8888, type=int,
help="The port to serve from. Default is 8888."
)
args = parser.parse_args()
if args.keytab:
AuthMixin = KerberosAuthMixin
init_kerberos(keytab=args.keytab)
skein_client = skein.Client(
keytab=args.keytab,
principal=args.principal or getuser()
)
else:
AuthMixin = SimpleAuthMixin
skein_client = skein.Client()
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, "index.html")) as f:
INDEX_HTML = f.read()
class LaunchHandler(AuthMixin, web.RequestHandler):
@property
def client(self):
return self.settings['client']
@web.authenticated
def get(self):
self.write(INDEX_HTML)
@web.authenticated
async def post(self):
queue = self.get_argument('queue') or 'default'
memory = float(self.get_argument('memory'))
vcores = int(self.get_argument('vcores'))
try:
script = self.request.files['script'][0]
except (IndexError, KeyError):
raise web.HTTPError(400, reason="Missing script")
if memory < 0.5 or memory > 8:
raise web.HTTPError("0.5 <= memory <= 8 required")
if vcores < 1 or vcores > 4:
raise web.HTTPError("1 <= vcores <= 4 required")
with tempfile.NamedTemporaryFile() as f:
f.write(script['body'])
f.file.flush()
# principal.
spec = skein.ApplicationSpec(
name="pyscript",
queue=queue,
user=self.current_user,
master=skein.Master(
resources=skein.Resources(
memory="%f GiB" % memory,
vcores=vcores
),
files={script['filename']: f.name},
script="python %s" % script['filename']
)
)
# Submit the application and get a report
report = await ioloop.IOLoop.current().run_in_executor(
None, self.submit_and_report, spec
)
# Redirect the user to the application's tracking url
self.redirect(report.tracking_url)
def submit_and_report(self, spec):
app_id = self.client.submit(spec)
report = self.client.application_report(app_id)
return report
app = web.Application([("/", LaunchHandler)], client=skein_client)
app.listen(args.port)
ioloop.IOLoop.current().start()
| true | true |
f71c8b37ee651e199c6b02d5bd122d3d43661a14 | 2,874 | py | Python | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
# Registry,
# RegistryController,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
# s2h,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
# 1) add oracle provider role to owner
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
# 2) oracle owner creates oracle
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
# 3) oracle owner proposes oracle to instance
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
# 4) instance operator approves oracle
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def getPolicy(self, policyId: str):
return self.policyController.getPolicy(policyId) | 27.113208 | 93 | 0.641267 | from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def getPolicy(self, policyId: str):
return self.policyController.getPolicy(policyId) | true | true |
f71c8bb35951957eb8062c9ab9ba757124ceaade | 1,056 | py | Python | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __version__ = "1.0.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "rick.a.kauffman@hpe.com"
from mongoengine import signals
from application import db
class Creds(db.Document):
hostip = db.StringField(db_field="h", required=True)
username= db.StringField(db_field="u", required=True)
password = db.StringField(db_field="p", required=True)
| 35.2 | 74 | 0.749053 |
from mongoengine import signals
from application import db
class Creds(db.Document):
hostip = db.StringField(db_field="h", required=True)
username= db.StringField(db_field="u", required=True)
password = db.StringField(db_field="p", required=True)
| true | true |
f71c8c2ec884cd59a6a4294250c173594ed45b44 | 2,351 | py | Python | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | 1 | 2021-06-17T05:23:19.000Z | 2021-06-17T05:23:19.000Z | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | null | null | null | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | null | null | null |
import git
import os
import logging
import glob
# Logger
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
SECURITY_CONTENT_URL = "https://github.com/splunk/security_content"
class GithubService:
def __init__(self, security_content_branch):
self.security_content_branch = security_content_branch
self.security_content_repo_obj = self.clone_project(SECURITY_CONTENT_URL, f"security_content", f"develop")
self.security_content_repo_obj.git.checkout(security_content_branch)
def clone_project(self, url, project, branch):
LOGGER.info(f"Clone Security Content Project")
repo_obj = git.Repo.clone_from(url, project, branch=branch)
return repo_obj
def get_changed_test_files_ssa(self):
branch1 = self.security_content_branch
branch2 = 'develop'
g = git.Git('security_content')
changed_ssa_test_files = []
if branch1 != 'develop':
differ = g.diff('--name-only', branch1, branch2)
changed_files = differ.splitlines()
for file_path in changed_files:
# added or changed test files
if file_path.startswith('tests'):
if os.path.basename(file_path).startswith('ssa'):
if file_path not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path)
# changed detections
if file_path.startswith('detections'):
if os.path.basename(file_path).startswith('ssa'):
file_path_base = os.path.splitext(file_path)[0].replace('detections', 'tests') + '.test'
file_path_new = file_path_base + '.yml'
if file_path_new not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path_new)
# all SSA test files for nightly build
else:
changed_files = sorted(glob.glob('security_content/tests/*/*.yml'))
for file_path in changed_files:
file_path = file_path.replace('security_content/','')
if os.path.basename(file_path).startswith('ssa'):
changed_ssa_test_files.append(file_path)
return changed_ssa_test_files
| 36.169231 | 114 | 0.632071 |
import git
import os
import logging
import glob
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
SECURITY_CONTENT_URL = "https://github.com/splunk/security_content"
class GithubService:
def __init__(self, security_content_branch):
self.security_content_branch = security_content_branch
self.security_content_repo_obj = self.clone_project(SECURITY_CONTENT_URL, f"security_content", f"develop")
self.security_content_repo_obj.git.checkout(security_content_branch)
def clone_project(self, url, project, branch):
LOGGER.info(f"Clone Security Content Project")
repo_obj = git.Repo.clone_from(url, project, branch=branch)
return repo_obj
def get_changed_test_files_ssa(self):
branch1 = self.security_content_branch
branch2 = 'develop'
g = git.Git('security_content')
changed_ssa_test_files = []
if branch1 != 'develop':
differ = g.diff('--name-only', branch1, branch2)
changed_files = differ.splitlines()
for file_path in changed_files:
if file_path.startswith('tests'):
if os.path.basename(file_path).startswith('ssa'):
if file_path not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path)
if file_path.startswith('detections'):
if os.path.basename(file_path).startswith('ssa'):
file_path_base = os.path.splitext(file_path)[0].replace('detections', 'tests') + '.test'
file_path_new = file_path_base + '.yml'
if file_path_new not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path_new)
else:
changed_files = sorted(glob.glob('security_content/tests/*/*.yml'))
for file_path in changed_files:
file_path = file_path.replace('security_content/','')
if os.path.basename(file_path).startswith('ssa'):
changed_ssa_test_files.append(file_path)
return changed_ssa_test_files
| true | true |
f71c8ce9b3d8ee3617835b4bd38ad01e0b6f17d2 | 2,725 | py | Python | gumpy/split.py | gumpy-bci/gumpy | abd8230dc50bd8b0a2348c6e08a1bba1c0ed3146 | [
"MIT"
] | 55 | 2018-02-20T14:17:06.000Z | 2022-03-22T06:33:31.000Z | gumpy/gumpy/split.py | Tizzio/gumpy-project | c51ee75ddf1eaa58813b493282014da6f31f5591 | [
"MIT"
] | 5 | 2018-02-17T06:54:55.000Z | 2019-07-16T15:18:25.000Z | gumpy/gumpy/split.py | Tizzio/gumpy-project | c51ee75ddf1eaa58813b493282014da6f31f5591 | [
"MIT"
] | 23 | 2018-02-17T06:45:56.000Z | 2022-03-04T06:01:07.000Z | import sklearn.model_selection
import numpy as np
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold
def normal(X, labels, test_size):
"""Split a dataset into training and test parts.
Args:
X (numpy.ndarray): 2D features matrix
labels: labels vector
test_size: size of the split
Returns:
A 2D CSP features matrix
"""
Y = labels
X_train, X_test, Y_train, Y_test = \
sklearn.model_selection.train_test_split(X, Y,
test_size=test_size,
random_state=0)
return X_train, X_test, Y_train, Y_test
def time_series_split(features, labels, n_splits):
"""Split a dataset into n splits.
"""
xx = sklearn.model_selection.TimeSeriesSplit(n_splits)
for train_index, test_index in xx.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
return X_train, X_test, y_train, y_test
def stratified_KFold(features, labels, n_splits):
"""Stratified K-Folds cross-validator
Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole
and by also keeping the balance of classes
"""
skf = StratifiedKFold(n_splits)
skf.get_n_splits(features, labels)
for train_index, test_index in skf.split(features, labels):
X_train, X_test = features[train_index], features[test_index]
Y_train, Y_test = labels[train_index], labels[test_index]
return X_train, X_test, Y_train, Y_test
#Stratified ShuffleSplit cross-validator
def stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):
"""Stratified ShuffleSplit cross-validator
"""
cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features,labels):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
#Random permutation cross-validator
def shuffle_Split(features, labels, n_splits,test_size,random_state):
"""ShuffleSplit: Random permutation cross-validator
"""
cv = ShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
| 36.333333 | 119 | 0.693945 | import sklearn.model_selection
import numpy as np
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold
def normal(X, labels, test_size):
Y = labels
X_train, X_test, Y_train, Y_test = \
sklearn.model_selection.train_test_split(X, Y,
test_size=test_size,
random_state=0)
return X_train, X_test, Y_train, Y_test
def time_series_split(features, labels, n_splits):
xx = sklearn.model_selection.TimeSeriesSplit(n_splits)
for train_index, test_index in xx.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
return X_train, X_test, y_train, y_test
def stratified_KFold(features, labels, n_splits):
skf = StratifiedKFold(n_splits)
skf.get_n_splits(features, labels)
for train_index, test_index in skf.split(features, labels):
X_train, X_test = features[train_index], features[test_index]
Y_train, Y_test = labels[train_index], labels[test_index]
return X_train, X_test, Y_train, Y_test
def stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):
cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features,labels):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
def shuffle_Split(features, labels, n_splits,test_size,random_state):
cv = ShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
| true | true |
f71c8d37ae326e29cdf957282fbbe1c51cf54ac4 | 1,004 | py | Python | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | 4 | 2020-10-09T15:59:09.000Z | 2020-11-18T08:34:44.000Z | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | import json
import requests
from utils import get_secret
from utils import is_pro
def send_slack(text="", channel="test", blocks=None):
assert channel in ["test", "events", "general"]
webhook = get_secret(f"SLACK_WEBHOOK_{channel.upper()}")
data = {"text": text}
if blocks:
data["blocks"] = blocks
res = requests.post(
webhook, data=json.dumps(data), headers={"Content-Type": "application/json"}
)
res.raise_for_status()
def slack_state_handler(task, old_state, new_state):
if not new_state.is_finished():
return new_state
failure = new_state.is_failed()
# Prepare message
if failure:
msg = f"*{task.name}:* :x:"
else:
msg = f"*{task.name}:* {task.duration} :heavy_check_mark:"
# Notify result
send_slack(msg, channel="events" if is_pro() else "test")
# In pro notify about failures in general
if failure and is_pro():
send_slack(msg, channel="general")
return new_state
| 21.361702 | 84 | 0.644422 | import json
import requests
from utils import get_secret
from utils import is_pro
def send_slack(text="", channel="test", blocks=None):
assert channel in ["test", "events", "general"]
webhook = get_secret(f"SLACK_WEBHOOK_{channel.upper()}")
data = {"text": text}
if blocks:
data["blocks"] = blocks
res = requests.post(
webhook, data=json.dumps(data), headers={"Content-Type": "application/json"}
)
res.raise_for_status()
def slack_state_handler(task, old_state, new_state):
if not new_state.is_finished():
return new_state
failure = new_state.is_failed()
if failure:
msg = f"*{task.name}:* :x:"
else:
msg = f"*{task.name}:* {task.duration} :heavy_check_mark:"
send_slack(msg, channel="events" if is_pro() else "test")
if failure and is_pro():
send_slack(msg, channel="general")
return new_state
| true | true |
f71c8d87b4e0910142ebc974a5c242cbc32868ab | 798 | py | Python | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 1 | 2022-03-31T07:30:53.000Z | 2022-03-31T07:30:53.000Z | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | null | null | null | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 2 | 2022-03-04T09:42:03.000Z | 2022-03-30T14:51:32.000Z | # B. Сбалансированное дерево
# ID успешной посылки 66593272
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.right = right
self.left = left
def height(root):
if root is None:
return 0
return max(height(root.left), height(root.right)) + 1
def solution(root):
if root is None:
return True
left_height = height(root.left)
right_height = height(root.right)
if ((abs(left_height - right_height) <= 1)
and solution(root.left) is True
and solution(root.right) is True):
return True
return False
def test():
node1 = Node(1)
node2 = Node(-5)
node3 = Node(3, node1, node2)
node4 = Node(10)
node5 = Node(2, node3, node4)
assert solution(node5)
| 21.567568 | 57 | 0.616541 |
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.right = right
self.left = left
def height(root):
if root is None:
return 0
return max(height(root.left), height(root.right)) + 1
def solution(root):
if root is None:
return True
left_height = height(root.left)
right_height = height(root.right)
if ((abs(left_height - right_height) <= 1)
and solution(root.left) is True
and solution(root.right) is True):
return True
return False
def test():
node1 = Node(1)
node2 = Node(-5)
node3 = Node(3, node1, node2)
node4 = Node(10)
node5 = Node(2, node3, node4)
assert solution(node5)
| true | true |
f71c8d946e5ae29a441cb944deb2a30473a80d7d | 21,205 | py | Python | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null |
"""
Get the normalized best template to do flux calibration.
"""
#- TODO: refactor algorithmic code into a separate module/function
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter
from desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = "G-R", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None : # second try
value=header[key.ljust(8).upper()]
return value
def dust_transmission(wave,ebv) :
Rv = 3.1
extinction = ext_odonnell(wave,Rv=Rv)
return 10**(-Rv*extinction*ebv/2.5)
def main(args) :
""" finds the best models of all standard stars in the frame
and normlize the model flux. Output is written to a file and will be called for calibration.
"""
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))
# READ DATA
############################################
# First loop through and group by exposure and spectrograph
frames_by_expid = {}
for filename in args.frames :
log.info("reading %s"%filename)
frame=io.read_frame(filename)
expid = safe_read_key(frame.meta,"EXPID")
camera = safe_read_key(frame.meta,"CAMERA").strip().lower()
spec = camera[1]
uniq_key = (expid,spec)
if uniq_key in frames_by_expid.keys():
frames_by_expid[uniq_key][camera] = frame
else:
frames_by_expid[uniq_key] = {camera: frame}
frames={}
flats={}
skies={}
spectrograph=None
starfibers=None
starindices=None
fibermap=None
# For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all
# cameras and then proceed with extracting the frame information
# once we modify the fibermap FIBERSTATUS
for (expid,spec),camdict in frames_by_expid.items():
fiberstatus = None
for frame in camdict.values():
if fiberstatus is None:
fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()
else:
fiberstatus |= frame.fibermap['FIBERSTATUS']
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
# Set fibermask flagged spectra to have 0 flux and variance
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
#- Confirm that all fluxes have entries but trust targeting bits
#- to get basic magnitude range correct
keep = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?
keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
# possibly cleanup memory
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
# NEED TO ADD MORE CHECKS
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
#raise ValueError("cannot handle several flats of same camera (%s)"%camera)
else :
flats[camera]=flat
if starindices.size == 0 :
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars"%starindices.size)
# log.warning("Not using flux errors for Standard Star fits!")
# DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA
############################################
# since poping dict, we need to copy keys to iterate over to avoid
# RuntimeError due to changing dict
frame_cams = list(frames.keys())
for cam in frame_cams:
if not cam in skies:
log.warning("Missing sky for %s"%cam)
frames.pop(cam)
continue
if not cam in flats:
log.warning("Missing flat for %s"%cam)
frames.pop(cam)
continue
flat=flats[cam]
for frame,sky in zip(frames[cam],skies[cam]) :
frame.flux = frame.flux[starindices]
frame.ivar = frame.ivar[starindices]
frame.ivar *= (frame.mask[starindices] == 0)
frame.ivar *= (sky.ivar[starindices] != 0)
frame.ivar *= (sky.mask[starindices] == 0)
frame.ivar *= (flat.ivar[starindices] != 0)
frame.ivar *= (flat.mask[starindices] == 0)
frame.flux *= ( frame.ivar > 0) # just for clean plots
for star in range(frame.flux.shape[0]) :
ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
nframes=len(frames[cam])
if nframes>1 :
# optimal weights for the coaddition = ivar*throughput, not directly ivar,
# we estimate the relative throughput with median fluxes at this stage
medflux=np.zeros(nframes)
for i,frame in enumerate(frames[cam]) :
if np.sum(frame.ivar>0) == 0 :
log.error("ivar=0 for all std star spectra in frame {}-{:08d}".format(cam,frame.meta["EXPID"]))
else :
medflux[i] = np.median(frame.flux[frame.ivar>0])
log.debug("medflux = {}".format(medflux))
medflux *= (medflux>0)
if np.sum(medflux>0)==0 :
log.error("mean median flux = 0, for all stars in fibers {}".format(list(frames[cam][0].fibermap["FIBER"][starindices])))
sys.exit(12)
mmedflux = np.mean(medflux[medflux>0])
weights=medflux/mmedflux
log.info("coadding {} exposures in cam {}, w={}".format(nframes,cam,weights))
sw=np.zeros(frames[cam][0].flux.shape)
swf=np.zeros(frames[cam][0].flux.shape)
swr=np.zeros(frames[cam][0].resolution_data.shape)
for i,frame in enumerate(frames[cam]) :
sw += weights[i]*frame.ivar
swf += weights[i]*frame.ivar*frame.flux
swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data
coadded_frame = frames[cam][0]
coadded_frame.ivar = sw
coadded_frame.flux = swf/(sw+(sw==0))
coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])
frames[cam] = [ coadded_frame ]
# CHECK S/N
############################################
# for each band in 'brz', record quadratic sum of median S/N across wavelength
snr=dict()
for band in ['b','r','z'] :
snr[band]=np.zeros(starindices.size)
for cam in frames :
band=cam[0].lower()
for frame in frames[cam] :
msnr = np.median( frame.flux * np.sqrt( frame.ivar ) / np.sqrt(np.gradient(frame.wave)) , axis=1 ) # median SNR per sqrt(A.)
msnr *= (msnr>0)
snr[band] = np.sqrt( snr[band]**2 + msnr**2 )
log.info("SNR(B) = {}".format(snr['b']))
###############################
max_number_of_stars = 50
min_blue_snr = 4.
###############################
indices=np.argsort(snr['b'])[::-1][:max_number_of_stars]
validstars = np.where(snr['b'][indices]>min_blue_snr)[0]
#- TODO: later we filter on models based upon color, thus throwing
#- away very blue stars for which we don't have good models.
log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}".format(min_blue_snr,validstars.size))
if validstars.size == 0 :
log.error("No valid star")
sys.exit(12)
validstars = indices[validstars]
for band in ['b','r','z'] :
snr[band]=snr[band][validstars]
log.info("BLUE SNR of selected stars={}".format(snr['b']))
for cam in frames :
for frame in frames[cam] :
frame.flux = frame.flux[validstars]
frame.ivar = frame.ivar[validstars]
frame.resolution_data = frame.resolution_data[validstars]
starindices = starindices[validstars]
starfibers = starfibers[validstars]
nstars = starindices.size
fibermap = Table(fibermap[starindices])
# MASK OUT THROUGHPUT DIP REGION
############################################
mask_throughput_dip_region = True
if mask_throughput_dip_region :
wmin=4300.
wmax=4500.
log.warning("Masking out the wavelength region [{},{}]A in the standard star fit".format(wmin,wmax))
for cam in frames :
for frame in frames[cam] :
ii=np.where( (frame.wave>=wmin)&(frame.wave<=wmax) )[0]
if ii.size>0 :
frame.ivar[:,ii] = 0
# READ MODELS
############################################
log.info("reading star models in %s"%args.starmodels)
stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)
# COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
############################################
#- Support older fibermaps
if 'PHOTSYS' not in fibermap.colnames:
log.warning('Old fibermap format; using defaults for missing columns')
log.warning(" PHOTSYS = 'S'")
log.warning(" EBV = 0.0")
fibermap['PHOTSYS'] = 'S'
fibermap['EBV'] = 0.0
model_filters = dict()
for band in ["G","R","Z"] :
for photsys in np.unique(fibermap['PHOTSYS']) :
model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)
log.info("computing model mags for %s"%sorted(model_filters.keys()))
model_mags = dict()
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
for filter_name, filter_response in model_filters.items():
model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)
log.info("done computing model mags")
# LOOP ON STARS TO FIND BEST MODEL
############################################
linear_coefficients=np.zeros((nstars,stdflux.shape[0]))
chi2dof=np.zeros((nstars))
redshift=np.zeros((nstars))
normflux=[]
star_mags = dict()
star_unextincted_mags = dict()
photometric_systems = np.unique(fibermap['PHOTSYS'])
for band in ['G', 'R', 'Z']:
star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])
star_unextincted_mags[band] = np.zeros(star_mags[band].shape)
for photsys in photometric_systems :
r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless
# r_band = a_band / E(B-V)
# E(B-V) is a difference of magnitudes (dimensionless)
# a_band = -2.5*log10(effective dust transmission) , dimensionless
# effective dust transmission =
# integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)
# / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)
selection = (fibermap['PHOTSYS'] == photsys)
a_band = r_band * fibermap['EBV'][selection] # dimensionless
star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band
star_colors = dict()
star_colors['G-R'] = star_mags['G'] - star_mags['R']
star_colors['R-Z'] = star_mags['R'] - star_mags['Z']
star_unextincted_colors = dict()
star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']
star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']
fitted_model_colors = np.zeros(nstars)
for star in range(nstars) :
log.info("finding best model for observed star #%d"%star)
# np.array of wave,flux,ivar,resol
wave = {}
flux = {}
ivar = {}
resolution_data = {}
for camera in frames :
for i,frame in enumerate(frames[camera]) :
identifier="%s-%d"%(camera,i)
wave[identifier]=frame.wave
flux[identifier]=frame.flux[star]
ivar[identifier]=frame.ivar[star]
resolution_data[identifier]=frame.resolution_data[star]
# preselect models based on magnitudes
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]
color_diff = model_colors - star_unextincted_colors[args.color][star]
selection = np.abs(color_diff) < args.delta_color
if np.sum(selection) == 0 :
log.warning("no model in the selected color range for this star")
continue
# smallest cube in parameter space including this selection (needed for interpolation)
new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))
new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))
new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))
selection = np.where(new_selection)[0]
log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%(
star, starfibers[star], args.color, star_unextincted_colors[args.color][star],
selection.size, stdflux.shape[0]))
# Match unextincted standard stars to data
coefficients, redshift[star], chi2dof[star] = match_templates(
wave, flux, ivar, resolution_data,
stdwave, stdflux[selection],
teff[selection], logg[selection], feh[selection],
ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,
template_error=args.template_error
)
linear_coefficients[star,selection] = coefficients
log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(
starfibers[star],
np.inner(teff,linear_coefficients[star]),
np.inner(logg,linear_coefficients[star]),
np.inner(feh,linear_coefficients[star]),
redshift[star],
chi2dof[star])
)
# Apply redshift to original spectrum at full resolution
model=np.zeros(stdwave.size)
redshifted_stdwave = stdwave*(1+redshift[star])
for i,c in enumerate(linear_coefficients[star]) :
if c != 0 :
model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])
# Apply dust extinction to the model
log.info("Applying MW dust extinction to star {} with EBV = {}".format(star,fibermap['EBV'][star]))
model *= dust_transmission(stdwave, fibermap['EBV'][star])
# Compute final color of dust-extincted model
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
fitted_model_colors[star] = model_mag1 - model_mag2
if bands[0]=="R" :
model_magr = model_mag1
elif bands[1]=="R" :
model_magr = model_mag2
#- TODO: move this back into normalize_templates, at the cost of
#- recalculating a model magnitude?
# Normalize the best model using reported magnitude
scalefac=10**((model_magr - star_mags['R'][star])/2.5)
log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))
normflux.append(model*scalefac)
# Now write the normalized flux for all best models to a file
normflux=np.array(normflux)
fitted_stars = np.where(chi2dof != 0)[0]
if fitted_stars.size == 0 :
log.error("No star has been fit.")
sys.exit(12)
data={}
data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)
data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)
data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)
data['CHI2DOF']=chi2dof[fitted_stars]
data['REDSHIFT']=redshift[fitted_stars]
data['COEFF']=linear_coefficients[fitted_stars,:]
data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]
data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]
data['BLUE_SNR'] = snr['b'][fitted_stars]
data['RED_SNR'] = snr['r'][fitted_stars]
data['NIR_SNR'] = snr['z'][fitted_stars]
io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)
| 43.01217 | 175 | 0.614902 |
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter
from desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = "G-R", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None :
value=header[key.ljust(8).upper()]
return value
def dust_transmission(wave,ebv) :
Rv = 3.1
extinction = ext_odonnell(wave,Rv=Rv)
return 10**(-Rv*extinction*ebv/2.5)
def main(args) :
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
keep = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']:
keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
else :
flats[camera]=flat
if starindices.size == 0 :
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars"%starindices.size)
erflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
nframes=len(frames[cam])
if nframes>1 :
medflux=np.zeros(nframes)
for i,frame in enumerate(frames[cam]) :
if np.sum(frame.ivar>0) == 0 :
log.error("ivar=0 for all std star spectra in frame {}-{:08d}".format(cam,frame.meta["EXPID"]))
else :
medflux[i] = np.median(frame.flux[frame.ivar>0])
log.debug("medflux = {}".format(medflux))
medflux *= (medflux>0)
if np.sum(medflux>0)==0 :
log.error("mean median flux = 0, for all stars in fibers {}".format(list(frames[cam][0].fibermap["FIBER"][starindices])))
sys.exit(12)
mmedflux = np.mean(medflux[medflux>0])
weights=medflux/mmedflux
log.info("coadding {} exposures in cam {}, w={}".format(nframes,cam,weights))
sw=np.zeros(frames[cam][0].flux.shape)
swf=np.zeros(frames[cam][0].flux.shape)
swr=np.zeros(frames[cam][0].resolution_data.shape)
for i,frame in enumerate(frames[cam]) :
sw += weights[i]*frame.ivar
swf += weights[i]*frame.ivar*frame.flux
swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data
coadded_frame = frames[cam][0]
coadded_frame.ivar = sw
coadded_frame.flux = swf/(sw+(sw==0))
coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])
frames[cam] = [ coadded_frame ]
:,ii] = 0
# READ MODELS
############################################
log.info("reading star models in %s"%args.starmodels)
stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)
# COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
############################################
#- Support older fibermaps
if 'PHOTSYS' not in fibermap.colnames:
log.warning('Old fibermap format; using defaults for missing columns')
log.warning(" PHOTSYS = 'S'")
log.warning(" EBV = 0.0")
fibermap['PHOTSYS'] = 'S'
fibermap['EBV'] = 0.0
model_filters = dict()
for band in ["G","R","Z"] :
for photsys in np.unique(fibermap['PHOTSYS']) :
model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)
log.info("computing model mags for %s"%sorted(model_filters.keys()))
model_mags = dict()
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
for filter_name, filter_response in model_filters.items():
model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)
log.info("done computing model mags")
# LOOP ON STARS TO FIND BEST MODEL
############################################
linear_coefficients=np.zeros((nstars,stdflux.shape[0]))
chi2dof=np.zeros((nstars))
redshift=np.zeros((nstars))
normflux=[]
star_mags = dict()
star_unextincted_mags = dict()
photometric_systems = np.unique(fibermap['PHOTSYS'])
for band in ['G', 'R', 'Z']:
star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])
star_unextincted_mags[band] = np.zeros(star_mags[band].shape)
for photsys in photometric_systems :
r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless
# r_band = a_band / E(B-V)
# E(B-V) is a difference of magnitudes (dimensionless)
# a_band = -2.5*log10(effective dust transmission) , dimensionless
# effective dust transmission =
# integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)
# / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)
selection = (fibermap['PHOTSYS'] == photsys)
a_band = r_band * fibermap['EBV'][selection] # dimensionless
star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band
star_colors = dict()
star_colors['G-R'] = star_mags['G'] - star_mags['R']
star_colors['R-Z'] = star_mags['R'] - star_mags['Z']
star_unextincted_colors = dict()
star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']
star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']
fitted_model_colors = np.zeros(nstars)
for star in range(nstars) :
log.info("finding best model for observed star #%d"%star)
# np.array of wave,flux,ivar,resol
wave = {}
flux = {}
ivar = {}
resolution_data = {}
for camera in frames :
for i,frame in enumerate(frames[camera]) :
identifier="%s-%d"%(camera,i)
wave[identifier]=frame.wave
flux[identifier]=frame.flux[star]
ivar[identifier]=frame.ivar[star]
resolution_data[identifier]=frame.resolution_data[star]
# preselect models based on magnitudes
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]
color_diff = model_colors - star_unextincted_colors[args.color][star]
selection = np.abs(color_diff) < args.delta_color
if np.sum(selection) == 0 :
log.warning("no model in the selected color range for this star")
continue
# smallest cube in parameter space including this selection (needed for interpolation)
new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))
new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))
new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))
selection = np.where(new_selection)[0]
log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%(
star, starfibers[star], args.color, star_unextincted_colors[args.color][star],
selection.size, stdflux.shape[0]))
# Match unextincted standard stars to data
coefficients, redshift[star], chi2dof[star] = match_templates(
wave, flux, ivar, resolution_data,
stdwave, stdflux[selection],
teff[selection], logg[selection], feh[selection],
ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,
template_error=args.template_error
)
linear_coefficients[star,selection] = coefficients
log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(
starfibers[star],
np.inner(teff,linear_coefficients[star]),
np.inner(logg,linear_coefficients[star]),
np.inner(feh,linear_coefficients[star]),
redshift[star],
chi2dof[star])
)
# Apply redshift to original spectrum at full resolution
model=np.zeros(stdwave.size)
redshifted_stdwave = stdwave*(1+redshift[star])
for i,c in enumerate(linear_coefficients[star]) :
if c != 0 :
model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])
# Apply dust extinction to the model
log.info("Applying MW dust extinction to star {} with EBV = {}".format(star,fibermap['EBV'][star]))
model *= dust_transmission(stdwave, fibermap['EBV'][star])
# Compute final color of dust-extincted model
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
fitted_model_colors[star] = model_mag1 - model_mag2
if bands[0]=="R" :
model_magr = model_mag1
elif bands[1]=="R" :
model_magr = model_mag2
#- TODO: move this back into normalize_templates, at the cost of
#- recalculating a model magnitude?
# Normalize the best model using reported magnitude
scalefac=10**((model_magr - star_mags['R'][star])/2.5)
log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))
normflux.append(model*scalefac)
# Now write the normalized flux for all best models to a file
normflux=np.array(normflux)
fitted_stars = np.where(chi2dof != 0)[0]
if fitted_stars.size == 0 :
log.error("No star has been fit.")
sys.exit(12)
data={}
data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)
data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)
data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)
data['CHI2DOF']=chi2dof[fitted_stars]
data['REDSHIFT']=redshift[fitted_stars]
data['COEFF']=linear_coefficients[fitted_stars,:]
data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]
data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]
data['BLUE_SNR'] = snr['b'][fitted_stars]
data['RED_SNR'] = snr['r'][fitted_stars]
data['NIR_SNR'] = snr['z'][fitted_stars]
io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)
| true | true |
f71c8ece5ac79d8215cf3897a8f8aec003849358 | 33,179 | py | Python | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 13 | 2020-03-04T10:16:28.000Z | 2022-01-06T11:14:29.000Z | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 5 | 2020-01-28T23:04:54.000Z | 2022-02-10T00:23:36.000Z | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 6 | 2019-10-22T12:43:40.000Z | 2021-09-18T08:10:31.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/box_predictor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/box_predictor.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x90\x03\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x12s\n)weight_shared_convolutional_box_predictor\x18\x04 \x01(\x0b\x32>.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\x90\x03\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\"\xcc\x05\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32')
,
dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1595,
serialized_end=1638,
)
_sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER)
_BOXPREDICTOR = _descriptor.Descriptor(
name='BoxPredictor',
full_name='object_detection.protos.BoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof',
index=0, containing_type=None, fields=[]),
],
serialized_start=116,
serialized_end=516,
)
_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='ConvolutionalBoxPredictor',
full_name='object_detection.protos.ConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=919,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor(
name='BoxEncodingsClipRange',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1544,
serialized_end=1593,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='WeightSharedConvolutionalBoxPredictor',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=3,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=4,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=5,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=7,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=8,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=9,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=10,
number=16, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=11,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ],
enum_types=[
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1638,
)
_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor(
name='MaskRCNNBoxPredictor',
full_name='object_detection.protos.MaskRCNNBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=2216,
)
_RFCNBOXPREDICTOR = _descriptor.Descriptor(
name='RfcnBoxPredictor',
full_name='object_detection.protos.RfcnBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1024,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2219,
serialized_end=2468,
)
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'])
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'])
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR
DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR
DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR
BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _BOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor)
))
_sym_db.RegisterMessage(BoxPredictor)
ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(ConvolutionalBoxPredictor)
WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), dict(
BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
))
,
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor)
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor)
))
_sym_db.RegisterMessage(MaskRCNNBoxPredictor)
RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _RFCNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor)
))
_sym_db.RegisterMessage(RfcnBoxPredictor)
# @@protoc_insertion_point(module_scope)
| 53.0864 | 3,991 | 0.777811 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/box_predictor.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x90\x03\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x12s\n)weight_shared_convolutional_box_predictor\x18\x04 \x01(\x0b\x32>.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\x90\x03\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\"\xcc\x05\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32')
,
dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1595,
serialized_end=1638,
)
_sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER)
_BOXPREDICTOR = _descriptor.Descriptor(
name='BoxPredictor',
full_name='object_detection.protos.BoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof',
index=0, containing_type=None, fields=[]),
],
serialized_start=116,
serialized_end=516,
)
_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='ConvolutionalBoxPredictor',
full_name='object_detection.protos.ConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=919,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor(
name='BoxEncodingsClipRange',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1544,
serialized_end=1593,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='WeightSharedConvolutionalBoxPredictor',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=3,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=4,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=5,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=7,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=8,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=9,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=10,
number=16, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=11,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ],
enum_types=[
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1638,
)
_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor(
name='MaskRCNNBoxPredictor',
full_name='object_detection.protos.MaskRCNNBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=2216,
)
_RFCNBOXPREDICTOR = _descriptor.Descriptor(
name='RfcnBoxPredictor',
full_name='object_detection.protos.RfcnBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1024,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2219,
serialized_end=2468,
)
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'])
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'])
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR
DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR
DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR
BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _BOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor)
))
_sym_db.RegisterMessage(BoxPredictor)
ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(ConvolutionalBoxPredictor)
WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), dict(
BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
))
,
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor)
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor)
))
_sym_db.RegisterMessage(MaskRCNNBoxPredictor)
RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _RFCNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor)
))
_sym_db.RegisterMessage(RfcnBoxPredictor)
# @@protoc_insertion_point(module_scope)
| true | true |
f71c8eebfa69486d737645f22b74bf824a91eeee | 1,479 | py | Python | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 5 | 2017-07-25T04:32:47.000Z | 2020-10-10T14:27:16.000Z | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 3 | 2020-02-11T23:53:22.000Z | 2021-06-10T19:29:52.000Z | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 1 | 2018-12-23T08:57:34.000Z | 2018-12-23T08:57:34.000Z | from pybeerxml.parser import Parser
from .picobrew_recipe import PicoBrewRecipe
from .picobrew_program_step import PicoBrewProgramStep
from xml.etree import ElementTree
class PicoBrewParser(Parser):
def parse(self, xml_file):
# Parse the BeerXML file
recipes = super(PicoBrewParser, self).parse(xml_file)
# include the recipe filename in the parsed recipes for id creation
for recipe in recipes:
recipe.filename = xml_file
# Cast all recipes to PicoBrewRcipes
recipes = [PicoBrewRecipe(recipe) for recipe in recipes]
# Parse the PicoBrew Program Steps
programs = self.parse_program_steps(xml_file)
# merge the parsed recipes with the PicoBrew program steps
for (recipe, steps) in zip(recipes, programs):
recipe.steps = steps
return recipes
def parse_program_steps(self, xml_file):
programs = []
with open(xml_file, "rt") as f:
tree = ElementTree.parse(f)
for programNode in tree.iterfind(".//PROGRAM"):
steps = []
for stepNode in list(programNode):
tag_name = self.to_lower(stepNode.tag)
if tag_name == "step":
step = PicoBrewProgramStep()
self.nodes_to_object(stepNode, step)
steps.append(step)
programs.append(steps)
return programs
| 29.58 | 75 | 0.610548 | from pybeerxml.parser import Parser
from .picobrew_recipe import PicoBrewRecipe
from .picobrew_program_step import PicoBrewProgramStep
from xml.etree import ElementTree
class PicoBrewParser(Parser):
def parse(self, xml_file):
recipes = super(PicoBrewParser, self).parse(xml_file)
for recipe in recipes:
recipe.filename = xml_file
recipes = [PicoBrewRecipe(recipe) for recipe in recipes]
programs = self.parse_program_steps(xml_file)
for (recipe, steps) in zip(recipes, programs):
recipe.steps = steps
return recipes
def parse_program_steps(self, xml_file):
programs = []
with open(xml_file, "rt") as f:
tree = ElementTree.parse(f)
for programNode in tree.iterfind(".//PROGRAM"):
steps = []
for stepNode in list(programNode):
tag_name = self.to_lower(stepNode.tag)
if tag_name == "step":
step = PicoBrewProgramStep()
self.nodes_to_object(stepNode, step)
steps.append(step)
programs.append(steps)
return programs
| true | true |
f71c8f6aa2a62ab271f35e5e3080e58ef457c6cb | 782 | py | Python | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import matplotlib.pyplot as pl
import numpy as np
from utils import util
from sklearn.cluster import KMeans
from utils.util import save_fig
data = util.load_mat('heightWeight/heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
save_fig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| 28.962963 | 56 | 0.644501 |
import matplotlib.pyplot as pl
import numpy as np
from utils import util
from sklearn.cluster import KMeans
from utils.util import save_fig
data = util.load_mat('heightWeight/heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
save_fig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| true | true |
f71c8fc259c0697f53a0ebace9290263e205e66d | 2,867 | py | Python | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | #: Okay
def test():
good = 1
#: Okay
def test():
def test2():
good = 1
#: Okay
GOOD = 1
#: Okay
class Test(object):
GOOD = 1
#: N806
def test():
Bad = 1
#: N806
def test():
VERY = 2
#: N806
def test():
def test2():
class Foo(object):
def test3(self):
Bad = 3
#: Okay(--ignore-names=Bad)
def test():
Bad = 1
#: Okay
def good():
global Bad
Bad = 1
#: N806
def bad():
global Bad
def foo():
Bad = 1
#: Okay
def test():
# namedtuples are often CamelCased since we treat them a bit like classes
import collections
Thing = collections.namedtuple('Thing', 'a b c')
from collections import namedtuple
ThingTwo = namedtuple('ThingTwo', 'a b c')
#: N806
def bad():
# Currently don't support aliased imports of namedtuple
from collections import namedtuple as nt
Thing = nt('Thing', 'a b c')
#: N806
def unpacking_into_tuple():
Var1, Var2 = range(2)
#: Okay
def unpacking_into_tuple():
var1, var2 = range(2)
#: N806
def unpacking_into_list():
[Var1, Var2] = range(2)
#: Okay
def unpacking_into_list():
[var1, var2] = range(2)
#: Okay
a, [b, c] = [1, [2, 3]]
#: N806
def recursive_unpack():
a, [bB, c] = [1, [2, 3]]
#: Okay
def assingnment_to_attribute():
a.b = 1
#: N806
def f():
with Foo(), Bar() as Bad:
pass
#: Okay
def f():
with FOO() as foo, bar() as bar:
pass
#: Okay
def f():
with suppress(E):
pass
with contextlib.suppress(E):
pass
#: Okay
with Test() as bar:
pass
#: N806
def f():
with Test() as BAD:
pass
#: Okay
def f():
with C() as [a, b, c]:
pass
#: N806
def f():
with C() as [a, Bad, c]:
pass
#: N806
def f():
with C() as (a, b, baD):
pass
#: Okay
def f():
for i in iterator:
pass
#: N806:2:9
def f():
for Bad in iterator:
pass
#: Okay
def f():
for a, b in enumerate(iterator):
pass
#: N806
def f():
for index, ITEM in enumerate(iterator):
pass
#: N806
def f():
try:
f()
except Exception as Bad:
pass
#: Okay
def f():
try:
f()
except Exception as good:
pass
#: Okay
def f():
try:
f()
except:
pass
#: Okay
def f():
try:
f()
except good:
pass
#: N806
def f():
try:
f()
except RuntimeError as good:
pass
except IndexError as BAD:
pass
#: Okay
def f():
return [i for i in range(3)]
#: N806:2:22
def t():
return [ITEM for ITEM in range(3)]
#: N806:2:24
def d():
return {AA: BB for AA, BB in {}}
#: N806:2:22
def s():
return {Item for Item in range(3)}
#: N806:2:57
def n():
return (good + BAD for good in range(3) if good for BAD in range(3) if BAD)
#: N806:2:26
def e():
return tuple(BaD for BaD in range(2))
| 16.668605 | 79 | 0.536798 |
def test():
good = 1
def test():
def test2():
good = 1
GOOD = 1
class Test(object):
GOOD = 1
def test():
Bad = 1
def test():
VERY = 2
def test():
def test2():
class Foo(object):
def test3(self):
Bad = 3
def test():
Bad = 1
def good():
global Bad
Bad = 1
def bad():
global Bad
def foo():
Bad = 1
def test():
import collections
Thing = collections.namedtuple('Thing', 'a b c')
from collections import namedtuple
ThingTwo = namedtuple('ThingTwo', 'a b c')
def bad():
from collections import namedtuple as nt
Thing = nt('Thing', 'a b c')
#: N806
def unpacking_into_tuple():
Var1, Var2 = range(2)
#: Okay
def unpacking_into_tuple():
var1, var2 = range(2)
#: N806
def unpacking_into_list():
[Var1, Var2] = range(2)
#: Okay
def unpacking_into_list():
[var1, var2] = range(2)
#: Okay
a, [b, c] = [1, [2, 3]]
#: N806
def recursive_unpack():
a, [bB, c] = [1, [2, 3]]
#: Okay
def assingnment_to_attribute():
a.b = 1
#: N806
def f():
with Foo(), Bar() as Bad:
pass
#: Okay
def f():
with FOO() as foo, bar() as bar:
pass
#: Okay
def f():
with suppress(E):
pass
with contextlib.suppress(E):
pass
#: Okay
with Test() as bar:
pass
#: N806
def f():
with Test() as BAD:
pass
#: Okay
def f():
with C() as [a, b, c]:
pass
#: N806
def f():
with C() as [a, Bad, c]:
pass
#: N806
def f():
with C() as (a, b, baD):
pass
#: Okay
def f():
for i in iterator:
pass
#: N806:2:9
def f():
for Bad in iterator:
pass
#: Okay
def f():
for a, b in enumerate(iterator):
pass
#: N806
def f():
for index, ITEM in enumerate(iterator):
pass
#: N806
def f():
try:
f()
except Exception as Bad:
pass
#: Okay
def f():
try:
f()
except Exception as good:
pass
#: Okay
def f():
try:
f()
except:
pass
#: Okay
def f():
try:
f()
except good:
pass
#: N806
def f():
try:
f()
except RuntimeError as good:
pass
except IndexError as BAD:
pass
#: Okay
def f():
return [i for i in range(3)]
#: N806:2:22
def t():
return [ITEM for ITEM in range(3)]
#: N806:2:24
def d():
return {AA: BB for AA, BB in {}}
#: N806:2:22
def s():
return {Item for Item in range(3)}
#: N806:2:57
def n():
return (good + BAD for good in range(3) if good for BAD in range(3) if BAD)
#: N806:2:26
def e():
return tuple(BaD for BaD in range(2))
| true | true |
f71c91a3f54d9b713dc013f6441b683eae4ab3e6 | 6,683 | py | Python | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | 13 | 2020-07-29T12:45:22.000Z | 2022-03-07T06:26:02.000Z | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | null | null | null | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | 3 | 2020-09-27T06:43:17.000Z | 2020-11-26T08:43:35.000Z | import torch
import torch.nn.functional as F
from torch.nn import Module
from torch_geometric.nn.conv import *
gnn_list = [
"gat_8", # GAT with 8 heads
"gat_6", # GAT with 6 heads
"gat_4", # GAT with 4 heads
"gat_2", # GAT with 2 heads
"gat_1", # GAT with 1 heads
"gcn", # GCN
"cheb", # chebnet
"sage", # sage
"arma",
"sg", # simplifying gcn
"linear", # skip connection
"zero", # skip connection
]
act_list = [
# "sigmoid", "tanh", "relu", "linear",
# "softplus", "leaky_relu", "relu6", "elu"
"sigmoid", "tanh", "relu", "linear", "elu"
]
def act_map(act):
if act == "linear":
return lambda x: x
elif act == "elu":
return F.elu
elif act == "sigmoid":
return torch.sigmoid
elif act == "tanh":
return torch.tanh
elif act == "relu":
return torch.nn.functional.relu
elif act == "relu6":
return torch.nn.functional.relu6
elif act == "softplus":
return torch.nn.functional.softplus
elif act == "leaky_relu":
return torch.nn.functional.leaky_relu
else:
raise Exception("wrong activate function")
def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:
'''
:param gnn_name:
:param in_dim:
:param out_dim:
:param concat: for gat, concat multi-head output or not
:return: GNN model
'''
if gnn_name == "gat_8":
return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)
elif gnn_name == "gat_6":
return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)
elif gnn_name == "gat_4":
return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)
elif gnn_name == "gat_2":
return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)
elif gnn_name in ["gat_1", "gat"]:
return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)
elif gnn_name == "gcn":
return GCNConv(in_dim, out_dim)
elif gnn_name == "cheb":
return ChebConv(in_dim, out_dim, K=2, bias=bias)
elif gnn_name == "sage":
return SAGEConv(in_dim, out_dim, bias=bias)
elif gnn_name == "gated":
return GatedGraphConv(in_dim, out_dim, bias=bias)
elif gnn_name == "arma":
return ARMAConv(in_dim, out_dim, bias=bias)
elif gnn_name == "sg":
return SGConv(in_dim, out_dim, bias=bias)
elif gnn_name == "linear":
return LinearConv(in_dim, out_dim, bias=bias)
elif gnn_name == "zero":
return ZeroConv(in_dim, out_dim, bias=bias)
class LinearConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(LinearConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.linear = torch.nn.Linear(in_channels, out_channels, bias)
def forward(self, x, edge_index, edge_weight=None):
return self.linear(x)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class ZeroConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(ZeroConv, self).__init__()
self.out_dim = out_channels
def forward(self, x, edge_index, edge_weight=None):
return torch.zeros([x.size(0), self.out_dim]).to(x.device)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class SearchSpace(object):
def __init__(self, search_space=None):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list # activate function
self.search_space["gnn"] = gnn_list # gnn type
# 0 means history, 1 means current,
# each layer contains two input
self.search_space["self_index"] = [0, 1]
# same as self_index,
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += ["self_index", "gnn"]
action_list += ["act", "concat_type"]
return action_list
class IncrementSearchSpace(object):
def __init__(self, search_space=None, max_cell=10):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list # activate function
self.search_space["gnn"] = gnn_list # gnn type
for i in range(max_cell):
self.search_space[f"self_index_{i}"] = list(range(2 + i))
# 0 means history, 1 means current,
# each layer contains two input
self.search_space["concat_type"] = ["add",
"product",
"concat"]
# same as self_index,
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += [f"self_index_{i}", "gnn"]
action_list += ["act", "concat_type"]
return action_list
if __name__ == "__main__":
obj = IncrementSearchSpace()
print(obj.generate_action_list())
print(obj.get_search_space())
| 34.448454 | 79 | 0.5511 | import torch
import torch.nn.functional as F
from torch.nn import Module
from torch_geometric.nn.conv import *
gnn_list = [
"gat_8",
"gat_6",
"gat_4",
"gat_2",
"gat_1",
"gcn",
"cheb",
"sage",
"arma",
"sg",
"linear",
"zero",
]
act_list = [
"sigmoid", "tanh", "relu", "linear", "elu"
]
def act_map(act):
if act == "linear":
return lambda x: x
elif act == "elu":
return F.elu
elif act == "sigmoid":
return torch.sigmoid
elif act == "tanh":
return torch.tanh
elif act == "relu":
return torch.nn.functional.relu
elif act == "relu6":
return torch.nn.functional.relu6
elif act == "softplus":
return torch.nn.functional.softplus
elif act == "leaky_relu":
return torch.nn.functional.leaky_relu
else:
raise Exception("wrong activate function")
def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:
if gnn_name == "gat_8":
return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)
elif gnn_name == "gat_6":
return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)
elif gnn_name == "gat_4":
return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)
elif gnn_name == "gat_2":
return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)
elif gnn_name in ["gat_1", "gat"]:
return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)
elif gnn_name == "gcn":
return GCNConv(in_dim, out_dim)
elif gnn_name == "cheb":
return ChebConv(in_dim, out_dim, K=2, bias=bias)
elif gnn_name == "sage":
return SAGEConv(in_dim, out_dim, bias=bias)
elif gnn_name == "gated":
return GatedGraphConv(in_dim, out_dim, bias=bias)
elif gnn_name == "arma":
return ARMAConv(in_dim, out_dim, bias=bias)
elif gnn_name == "sg":
return SGConv(in_dim, out_dim, bias=bias)
elif gnn_name == "linear":
return LinearConv(in_dim, out_dim, bias=bias)
elif gnn_name == "zero":
return ZeroConv(in_dim, out_dim, bias=bias)
class LinearConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(LinearConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.linear = torch.nn.Linear(in_channels, out_channels, bias)
def forward(self, x, edge_index, edge_weight=None):
return self.linear(x)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class ZeroConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(ZeroConv, self).__init__()
self.out_dim = out_channels
def forward(self, x, edge_index, edge_weight=None):
return torch.zeros([x.size(0), self.out_dim]).to(x.device)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class SearchSpace(object):
def __init__(self, search_space=None):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list
self.search_space["gnn"] = gnn_list
self.search_space["self_index"] = [0, 1]
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += ["self_index", "gnn"]
action_list += ["act", "concat_type"]
return action_list
class IncrementSearchSpace(object):
def __init__(self, search_space=None, max_cell=10):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list
self.search_space["gnn"] = gnn_list
for i in range(max_cell):
self.search_space[f"self_index_{i}"] = list(range(2 + i))
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += [f"self_index_{i}", "gnn"]
action_list += ["act", "concat_type"]
return action_list
if __name__ == "__main__":
obj = IncrementSearchSpace()
print(obj.generate_action_list())
print(obj.get_search_space())
| true | true |
f71c945e6058577857c0b8a5868cd8a7b234044b | 2,412 | py | Python | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from pathlib import Path
from traitlets import default, observe, Unicode
from tornado.web import RedirectHandler
from jupyter_server.extension.application import ExtensionApp
from jupyter_server.utils import url_path_join
from jupyter_server.transutils import _
STATIC_ASSETS_PATH = Path(__file__).parent / "static"
class DeprecatedRedirectHandler(RedirectHandler):
def get(self, *args, **kwargs):
import warnings
warnings.warn(
"Redirecting old Notebook MathJax URL to new one. This will be removed in a future release.",
PendingDeprecationWarning,
)
super().get(*args, **kwargs)
class MathJaxExtension(ExtensionApp):
name = "jupyter_server_mathjax"
# By listing the path to the assets here, jupyter_server
# automatically creates a static file handler at
# /static/jupyter_server_mathjax/...
static_paths = [str(STATIC_ASSETS_PATH)]
mathjax_config = Unicode(
"TeX-AMS-MML_HTMLorMML-full,Safe",
config=True,
help=_("""The MathJax.js configuration file that is to be used."""),
)
@observe("mathjax_config")
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change["new"])
def initialize_settings(self):
# Add settings specific to this extension to the
# tornado webapp settings.
self.settings.update({
"mathjax_config": self.mathjax_config,
"mathjax_url": "/static/jupyter_server_mathjax/MathJax.js"
})
def initialize_handlers(self):
webapp = self.serverapp.web_app
base_url = self.serverapp.base_url
host_pattern = ".*$"
# Add a deprecated redirect for all MathJax paths from the classic
# notebook to the static endpoint created for this extension.
webapp.add_handlers(
host_pattern,
[
(
url_path_join(base_url, "/static/components/MathJax/(.*)"),
DeprecatedRedirectHandler,
{
"url": url_path_join(
self.static_url_prefix, "/{0}" # {0} = group 0 in url path
)
},
)
],
)
| 31.736842 | 105 | 0.625622 |
from pathlib import Path
from traitlets import default, observe, Unicode
from tornado.web import RedirectHandler
from jupyter_server.extension.application import ExtensionApp
from jupyter_server.utils import url_path_join
from jupyter_server.transutils import _
STATIC_ASSETS_PATH = Path(__file__).parent / "static"
class DeprecatedRedirectHandler(RedirectHandler):
def get(self, *args, **kwargs):
import warnings
warnings.warn(
"Redirecting old Notebook MathJax URL to new one. This will be removed in a future release.",
PendingDeprecationWarning,
)
super().get(*args, **kwargs)
class MathJaxExtension(ExtensionApp):
name = "jupyter_server_mathjax"
static_paths = [str(STATIC_ASSETS_PATH)]
mathjax_config = Unicode(
"TeX-AMS-MML_HTMLorMML-full,Safe",
config=True,
help=_("""The MathJax.js configuration file that is to be used."""),
)
@observe("mathjax_config")
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change["new"])
def initialize_settings(self):
self.settings.update({
"mathjax_config": self.mathjax_config,
"mathjax_url": "/static/jupyter_server_mathjax/MathJax.js"
})
def initialize_handlers(self):
webapp = self.serverapp.web_app
base_url = self.serverapp.base_url
host_pattern = ".*$"
webapp.add_handlers(
host_pattern,
[
(
url_path_join(base_url, "/static/components/MathJax/(.*)"),
DeprecatedRedirectHandler,
{
"url": url_path_join(
self.static_url_prefix, "/{0}"
)
},
)
],
)
| true | true |
f71c94ef510848605c979ad6aae3be1a96a86bcd | 5,538 | py | Python | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 10 | 2021-01-10T09:39:16.000Z | 2022-02-05T06:40:47.000Z | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | null | null | null | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 1 | 2021-01-11T17:04:06.000Z | 2021-01-11T17:04:06.000Z | import asyncio
import os
from datetime import date
from os import getenv
from django.core.files.images import ImageFile
from django.core.management.base import BaseCommand
from movies.models import Poster, Movie, Genre
from person.models import Person, Photo, PersonRole
from parser.formatter import get_formatted_movie_fields, get_formatted_person_fields, get_formatted_role_fields
from parser.kinopoisk_api import KP
from argparse import ArgumentParser
class Command(BaseCommand):
help = 'Get full film info from kinopoisk and add to database'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('movie_id', type=int)
parser.add_argument('-k', '--api-key', default=getenv('KP_API_KEY'))
async def _get_movie_info(self, kp: KP, movie_id: int):
movie, persons = await kp.get_full_film_info(movie_id)
posters = await kp.get_film_photo(movie_id)
kp.REQUESTS_LIMIT = 50
photos_tasks = [asyncio.create_task(kp.get_person_photo(person["kp_id"])) for person in persons]
photos = await asyncio.gather(*photos_tasks)
return {
'movie': movie,
'posters': posters,
'persons': persons,
'photos': photos
}
def _get_kp_id_from_image_data(self, image_data: dict):
filename: str = next(iter(image_data))
return int(filename.removesuffix('.jpg').removeprefix('person_').removeprefix('movie_'))
@staticmethod
def safe_mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def add_person(self, raw_person_data: dict, photos) -> tuple[int, Person]:
kp_id = int(raw_person_data.get('kp_id'))
person_data = get_formatted_person_fields(raw_person_data)
person_data['birth_date'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['birth_date']) else None
person_data['death'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['death']) else None
person: Person = Person.objects.get_or_create(**person_data)[0]
if not person.photos.exists() and (image_bin := next(iter(photos[kp_id].values()))):
self.safe_mkdir('temp')
file_path = os.path.join('temp', next(iter(photos[kp_id])))
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Photo(image=ImageFile(open(file_path, 'rb')),
person=person,
orientation=Photo.OrientationType.VERTICAL.name,
format=Photo.FormatType.MEDIUM.name).save()
finally:
os.remove(file_path)
return kp_id, person
def handle(self, *args, **options):
movie_id = options['movie_id']
self.main(movie_id, options['api_key'])
def main(self, movie_id, api_key):
print(api_key)
kinopoisk = KP(api_key)
self.stdout.write("Collect data")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(self._get_movie_info(kinopoisk, movie_id))
loop.run_until_complete(future)
full_movie_info: dict = future.result()
self.stdout.write(self.style.SUCCESS("Data received"))
movie_info: dict = full_movie_info['movie']
genres = [Genre.objects.get_or_create(title=genre)[0] for genre in movie_info['genres']]
formatted_movie_info = get_formatted_movie_fields(movie_info)
# movie = Movie.objects.filter(**formatted_movie_info).first()
if Movie.objects.filter(**formatted_movie_info).exists():
self.stdout.write(self.style.WARNING(f"Movie {movie_id} exists in this database"))
return
formatted_movie_info['movie_type_id'] = formatted_movie_info.pop('movie_type')
movie: Movie = Movie(**formatted_movie_info)
movie.save()
self.stdout.write(f"Movie {movie} created")
for genre in genres:
movie.genres.add(genre)
self.stdout.write(self.style.SUCCESS("Movie saved"))
photos = {self._get_kp_id_from_image_data(image_data): image_data for image_data in full_movie_info['photos']}
persons_kp_id_map = {}
raw_person_data: dict
for raw_person_data in full_movie_info['persons']:
kp_id, person = self.add_person(raw_person_data, photos)
persons_kp_id_map[kp_id] = person
self.stdout.write(self.style.SUCCESS("Persons saved"))
for role in movie_info['roles']:
PersonRole(**get_formatted_role_fields(role, movie, persons_kp_id_map[int(role['kp_id'])])).save()
self.stdout.write(self.style.SUCCESS("Roles saved"))
for filename, image_bin in full_movie_info['posters'].items():
if not image_bin:
continue
self.safe_mkdir('temp')
file_path = os.path.join('temp', filename)
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Poster(movie=movie,
image=ImageFile(open(file_path, 'rb')),
orientation=Poster.OrientationType.VERTICAL.name,
format=Poster.FormatType.LARGE.name if '_small' in filename else Poster.FormatType.LARGE.name). \
save()
finally:
os.remove(file_path)
os.rmdir('temp')
self.stdout.write(self.style.SUCCESS("Posters saved"))
| 43.606299 | 120 | 0.639581 | import asyncio
import os
from datetime import date
from os import getenv
from django.core.files.images import ImageFile
from django.core.management.base import BaseCommand
from movies.models import Poster, Movie, Genre
from person.models import Person, Photo, PersonRole
from parser.formatter import get_formatted_movie_fields, get_formatted_person_fields, get_formatted_role_fields
from parser.kinopoisk_api import KP
from argparse import ArgumentParser
class Command(BaseCommand):
help = 'Get full film info from kinopoisk and add to database'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('movie_id', type=int)
parser.add_argument('-k', '--api-key', default=getenv('KP_API_KEY'))
async def _get_movie_info(self, kp: KP, movie_id: int):
movie, persons = await kp.get_full_film_info(movie_id)
posters = await kp.get_film_photo(movie_id)
kp.REQUESTS_LIMIT = 50
photos_tasks = [asyncio.create_task(kp.get_person_photo(person["kp_id"])) for person in persons]
photos = await asyncio.gather(*photos_tasks)
return {
'movie': movie,
'posters': posters,
'persons': persons,
'photos': photos
}
def _get_kp_id_from_image_data(self, image_data: dict):
filename: str = next(iter(image_data))
return int(filename.removesuffix('.jpg').removeprefix('person_').removeprefix('movie_'))
@staticmethod
def safe_mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def add_person(self, raw_person_data: dict, photos) -> tuple[int, Person]:
kp_id = int(raw_person_data.get('kp_id'))
person_data = get_formatted_person_fields(raw_person_data)
person_data['birth_date'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['birth_date']) else None
person_data['death'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['death']) else None
person: Person = Person.objects.get_or_create(**person_data)[0]
if not person.photos.exists() and (image_bin := next(iter(photos[kp_id].values()))):
self.safe_mkdir('temp')
file_path = os.path.join('temp', next(iter(photos[kp_id])))
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Photo(image=ImageFile(open(file_path, 'rb')),
person=person,
orientation=Photo.OrientationType.VERTICAL.name,
format=Photo.FormatType.MEDIUM.name).save()
finally:
os.remove(file_path)
return kp_id, person
def handle(self, *args, **options):
movie_id = options['movie_id']
self.main(movie_id, options['api_key'])
def main(self, movie_id, api_key):
print(api_key)
kinopoisk = KP(api_key)
self.stdout.write("Collect data")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(self._get_movie_info(kinopoisk, movie_id))
loop.run_until_complete(future)
full_movie_info: dict = future.result()
self.stdout.write(self.style.SUCCESS("Data received"))
movie_info: dict = full_movie_info['movie']
genres = [Genre.objects.get_or_create(title=genre)[0] for genre in movie_info['genres']]
formatted_movie_info = get_formatted_movie_fields(movie_info)
if Movie.objects.filter(**formatted_movie_info).exists():
self.stdout.write(self.style.WARNING(f"Movie {movie_id} exists in this database"))
return
formatted_movie_info['movie_type_id'] = formatted_movie_info.pop('movie_type')
movie: Movie = Movie(**formatted_movie_info)
movie.save()
self.stdout.write(f"Movie {movie} created")
for genre in genres:
movie.genres.add(genre)
self.stdout.write(self.style.SUCCESS("Movie saved"))
photos = {self._get_kp_id_from_image_data(image_data): image_data for image_data in full_movie_info['photos']}
persons_kp_id_map = {}
raw_person_data: dict
for raw_person_data in full_movie_info['persons']:
kp_id, person = self.add_person(raw_person_data, photos)
persons_kp_id_map[kp_id] = person
self.stdout.write(self.style.SUCCESS("Persons saved"))
for role in movie_info['roles']:
PersonRole(**get_formatted_role_fields(role, movie, persons_kp_id_map[int(role['kp_id'])])).save()
self.stdout.write(self.style.SUCCESS("Roles saved"))
for filename, image_bin in full_movie_info['posters'].items():
if not image_bin:
continue
self.safe_mkdir('temp')
file_path = os.path.join('temp', filename)
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Poster(movie=movie,
image=ImageFile(open(file_path, 'rb')),
orientation=Poster.OrientationType.VERTICAL.name,
format=Poster.FormatType.LARGE.name if '_small' in filename else Poster.FormatType.LARGE.name). \
save()
finally:
os.remove(file_path)
os.rmdir('temp')
self.stdout.write(self.style.SUCCESS("Posters saved"))
| true | true |
f71c9666f42e0445cb30a86089bfe762d8443e53 | 1,292 | py | Python | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | 8 | 2019-01-22T13:03:40.000Z | 2021-12-30T22:11:12.000Z | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | null | null | null | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | null | null | null | from archspee.presenters import PresenterBase
from archspee.listeners import ListenerStatus
_LOG_LEVEL = None
class LogPresenter(PresenterBase):
def __init__(self, action_callback, **kwargs):
self.__log_level = _LOG_LEVEL
super(LogPresenter, self).__init__(action_callback)
self.status = ListenerStatus.standby
self.disabled = False
def on_listener_status(self, trigger_id, status, is_disabled):
if status != self.status or is_disabled != self.disabled:
self.logger.info('Status changed: status=%s, disabled=%d' % (repr(status), is_disabled))
self.status = status
self.disabled = is_disabled
def on_recognization_started(self, trigger_id):
self.logger.info('Recognization started')
def on_intent_handled(self, trigger_id, spoken_text, intent, entities, summary, body, level):
self.logger.info('Intent handled: %s, %s (%s)' % (summary, body, level))
def on_error_handled(self, trigger_id, status_code, response_text, summary, body, level):
self.logger.info('Error handled: %s, %s (%s)' % (summary, body, level))
def start(self):
self.logger.info('Log presenter started.');
def terminate(self):
self.logger.info('Log presenter terminated.');
| 39.151515 | 100 | 0.687307 | from archspee.presenters import PresenterBase
from archspee.listeners import ListenerStatus
_LOG_LEVEL = None
class LogPresenter(PresenterBase):
def __init__(self, action_callback, **kwargs):
self.__log_level = _LOG_LEVEL
super(LogPresenter, self).__init__(action_callback)
self.status = ListenerStatus.standby
self.disabled = False
def on_listener_status(self, trigger_id, status, is_disabled):
if status != self.status or is_disabled != self.disabled:
self.logger.info('Status changed: status=%s, disabled=%d' % (repr(status), is_disabled))
self.status = status
self.disabled = is_disabled
def on_recognization_started(self, trigger_id):
self.logger.info('Recognization started')
def on_intent_handled(self, trigger_id, spoken_text, intent, entities, summary, body, level):
self.logger.info('Intent handled: %s, %s (%s)' % (summary, body, level))
def on_error_handled(self, trigger_id, status_code, response_text, summary, body, level):
self.logger.info('Error handled: %s, %s (%s)' % (summary, body, level))
def start(self):
self.logger.info('Log presenter started.');
def terminate(self):
self.logger.info('Log presenter terminated.');
| true | true |
f71c96af05ee8e95f66b314c7abe60dd75cb2846 | 14,146 | py | Python | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import warnings
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, Union
from oneflow.framework.tensor import Tensor
from oneflow.nn.graph.block import TensorBlock
from oneflow.nn.parameter import Parameter
from oneflow.nn.utils.clip_grad import clip_grad_norm_
import oneflow as flow
class ParamGroup(object):
def __init__(
self, parameters: Dict[str, Any], default_options: Dict,
):
# ParamGroup must be constructed by Dict["params": parameters: List[Parameter, Tensor or TensorBlock], "...": ...]
assert isinstance(parameters, dict) and "params" in parameters
assert not isinstance(parameters["params"], (Parameter, Tensor))
self._parameters = list()
for p in parameters["params"]:
if isinstance(p, (Parameter, Tensor)):
self._parameters.append(p)
elif isinstance(p, TensorBlock):
# Add parameter from nn.Graph
self._parameters.append(p.origin)
else:
raise ValueError(
"parameters in ParamGroup must be Tensor or TensorBlock."
)
self._options = deepcopy(default_options)
for key in self._options:
if key in parameters:
self._options[key] = parameters[key]
self._enable_clip_grad = False
if "clip_grad_max_norm" in parameters and "clip_grad_norm_type" in parameters:
self._enable_clip_grad = True
self._options["clip_grad_max_norm"] = parameters["clip_grad_max_norm"]
self._options["clip_grad_norm_type"] = parameters["clip_grad_norm_type"]
def __getitem__(self, key):
return self._options[key]
def __setitem__(self, key, value):
self._options[key] = value
def __contains__(self, key):
return self._options.__contains__(key)
def setdefault(self, key, value):
if key not in self._options:
self._options[key] = value
def items(self):
return self.__dict__.items()
@property
def options(self):
return self._options
@property
def parameters(self):
return self._parameters
class _SourceOpOnlyResourceDependenceMode:
def __init__(self):
self.guard_ = None
def __enter__(self):
self.guard = (
flow._oneflow_internal.eager.multi_client.SourceOpOnlyResourceDependenceModeGuard()
)
def __exit__(self, *args, **kwargs):
del self.guard
def _decorate_step(step):
def decorated_step(*args, **kwargs):
with _SourceOpOnlyResourceDependenceMode():
return step(*args, **kwargs)
return decorated_step
class Optimizer(object):
def __init__(self, parameters, options):
self.param_groups = list()
self._default_options = options
self._state = dict()
self._state["step"] = 0
self._parse_input_parameters(parameters)
self.step = _decorate_step(self.step)
def add_param_group(self, param_group) -> None:
raise NotImplementedError()
def load_state_dict(self, state_dict) -> None:
r"""
Load the state of the optimizer which is created by `state_dict` function.
It almost copied from: https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.load_state_dict
"""
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
param_lens = (len(g._parameters) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in saved_groups)),
chain.from_iterable((g._parameters for g in groups)),
)
}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device or placement of param."""
if isinstance(value, Tensor):
if value.is_local:
value = value.to(param.device)
else:
value = value.to_consistent(
placement=param.placement, sbp=param.sbp
)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, collections.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = dict()
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
self._state = state
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
group._options = deepcopy(new_group["_options"])
group._enable_clip_grad = new_group["_enable_clip_grad"]
return group
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.param_groups = param_groups
def state_dict(self):
r"""
Returns the state of the optimizer as a :class:`dict`.
It contains two entries:
* state - a dict holding current optimization state. Its content
differs between optimizer classes.
* param_group - a dict containing all parameter groups.
It almost copied from: https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.state_dict
"""
# Save order indices instead of Tensors
param_mappings = {}
start_index = 0
def pack_group(group):
nonlocal start_index
packed = {k: v for k, v in group.items() if k != "_parameters"}
param_mappings.update(
{
id(p): i
for i, p in enumerate(group._parameters, start_index)
if id(p) not in param_mappings
}
)
packed["params"] = [param_mappings[id(p)] for p in group._parameters]
start_index += len(packed["params"])
return packed
param_groups = [pack_group(g) for g in self.param_groups]
# Remap state to use order indices as keys
packed_state = {
(param_mappings[id(k)] if isinstance(k, Tensor) else k): v
for k, v in self._state.items()
}
return {
"state": packed_state,
"param_groups": param_groups,
}
def step(self, closure: Union[Callable, None] = None) -> Union[Tensor, None]:
raise NotImplementedError()
def clip_grad(self):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were concatenated into a single vector.
You can set the max_norm and norm_type.
For more details, you can refer to the documentation of each optimizer(like Adam, SGD and so on).
You can also refer the code in :func:`oneflow.nn.utils.clip_grad_norm_`
"""
for param_group in self.param_groups:
if param_group._enable_clip_grad:
clip_grad_norm_(
param_group.parameters,
param_group["clip_grad_max_norm"],
param_group["clip_grad_norm_type"],
True,
)
else:
warnings.warn(
"To enable clip_grad, passing the `clip_grad_max_norm` and `clip_grad_norm_type` parameters when instantializing the Optimizer."
)
def zero_grad(self, set_to_none: bool = False):
"""Sets the gradients of all optimized torch.Tensor s to zero.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly
improve performance. However, it changes certain behaviors.
For example:
1. When the user tries to access a gradient and perform manual ops on
it, a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests zero_grad(set_to_none=True) followed by a
backward pass, grads are guaranteed to be None for params that did not
receive a gradient.
3. Optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other
it skips the step altogether).
"""
for param_group in self.param_groups:
for param in param_group.parameters:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
param.grad.zeros_()
def _parse_input_parameters(self, parameters):
"""
Supports such parameters:
1. Iterator: flow.optim.SGD(module.parameters(), lr=0.1)
2. List[Dict]: flow.optim.SGD([{"params": module1.parameters()}, {"params": module2.parameters()}])
3. List[Parameter or Tensor]: flow.optim.SGD([module.weight, module.bias])
"""
if isinstance(parameters, collections.abc.Iterator):
# Iterator
self.param_groups.append(
ParamGroup({"params": list(parameters)}, self._default_options)
)
elif isinstance(parameters, collections.abc.Iterable):
# List[Dict]
if isinstance(parameters[0], dict):
for param in parameters:
assert isinstance(param, dict)
self.param_groups.append(ParamGroup(param, self._default_options))
# List[Parameter or Tensor]
else:
self.param_groups.append(
ParamGroup({"params": parameters}, self._default_options)
)
else:
raise TypeError(
f"params argument given to the optimizer should be an iterable of Tensors or dicts, but got {type(parameters)}"
)
def _generate_grad_clip_conf_for_optim_conf(self, param_group, optimizer_conf):
if param_group._enable_clip_grad:
if (
param_group["clip_grad_max_norm"] == 1.0
and param_group["clip_grad_norm_type"] == 2.0
):
optimizer_conf.mutable_clip_conf().mutable_clip_by_global_norm().set_clip_norm(
param_group["clip_grad_max_norm"]
)
else:
warnings.warn(
"For now, nn.Graph only support clip grad with `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`."
)
@property
def support_sparse(self):
return False
def _check_variables_in_graph(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if param not in vars_conf:
raise ValueError(
f"Parameter <{param}> is not in the corresponding nn.Graph/nn.Module."
" Please make sure you call the module's to(..)/to_consistent(...) method first,"
" then add the module's parameters into an optimizer."
)
def _check_variables_optimizer_bound(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if vars_conf[param].bound_optimizer is None:
vars_conf[param].bound_optimizer = self
elif vars_conf[param].bound_optimizer is not self:
raise ValueError(
f"<{vars_conf[param].name}> is already bound to another optimizer."
)
def _generate_indexed_slices_optimizer_conf(self, job_conf, vars_conf):
if not self.support_sparse:
raise ValueError(f"{self.__class__} does not support sparse updating.")
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
sparse_opt_conf = job_conf.mutable_indexed_slices_optimizer_conf()
sparse_variable_op_names = sparse_opt_conf.mutable_include_op_names()
sparse_variable_op_names.add_op_name(vars_conf[param].name)
| 38.336043 | 148 | 0.600028 | import collections
import warnings
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, Union
from oneflow.framework.tensor import Tensor
from oneflow.nn.graph.block import TensorBlock
from oneflow.nn.parameter import Parameter
from oneflow.nn.utils.clip_grad import clip_grad_norm_
import oneflow as flow
class ParamGroup(object):
def __init__(
self, parameters: Dict[str, Any], default_options: Dict,
):
assert isinstance(parameters, dict) and "params" in parameters
assert not isinstance(parameters["params"], (Parameter, Tensor))
self._parameters = list()
for p in parameters["params"]:
if isinstance(p, (Parameter, Tensor)):
self._parameters.append(p)
elif isinstance(p, TensorBlock):
self._parameters.append(p.origin)
else:
raise ValueError(
"parameters in ParamGroup must be Tensor or TensorBlock."
)
self._options = deepcopy(default_options)
for key in self._options:
if key in parameters:
self._options[key] = parameters[key]
self._enable_clip_grad = False
if "clip_grad_max_norm" in parameters and "clip_grad_norm_type" in parameters:
self._enable_clip_grad = True
self._options["clip_grad_max_norm"] = parameters["clip_grad_max_norm"]
self._options["clip_grad_norm_type"] = parameters["clip_grad_norm_type"]
def __getitem__(self, key):
return self._options[key]
def __setitem__(self, key, value):
self._options[key] = value
def __contains__(self, key):
return self._options.__contains__(key)
def setdefault(self, key, value):
if key not in self._options:
self._options[key] = value
def items(self):
return self.__dict__.items()
@property
def options(self):
return self._options
@property
def parameters(self):
return self._parameters
class _SourceOpOnlyResourceDependenceMode:
def __init__(self):
self.guard_ = None
def __enter__(self):
self.guard = (
flow._oneflow_internal.eager.multi_client.SourceOpOnlyResourceDependenceModeGuard()
)
def __exit__(self, *args, **kwargs):
del self.guard
def _decorate_step(step):
def decorated_step(*args, **kwargs):
with _SourceOpOnlyResourceDependenceMode():
return step(*args, **kwargs)
return decorated_step
class Optimizer(object):
def __init__(self, parameters, options):
self.param_groups = list()
self._default_options = options
self._state = dict()
self._state["step"] = 0
self._parse_input_parameters(parameters)
self.step = _decorate_step(self.step)
def add_param_group(self, param_group) -> None:
raise NotImplementedError()
def load_state_dict(self, state_dict) -> None:
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
param_lens = (len(g._parameters) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in saved_groups)),
chain.from_iterable((g._parameters for g in groups)),
)
}
def cast(param, value):
if isinstance(value, Tensor):
if value.is_local:
value = value.to(param.device)
else:
value = value.to_consistent(
placement=param.placement, sbp=param.sbp
)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, collections.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
state = dict()
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
self._state = state
def update_group(group, new_group):
group._options = deepcopy(new_group["_options"])
group._enable_clip_grad = new_group["_enable_clip_grad"]
return group
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.param_groups = param_groups
def state_dict(self):
param_mappings = {}
start_index = 0
def pack_group(group):
nonlocal start_index
packed = {k: v for k, v in group.items() if k != "_parameters"}
param_mappings.update(
{
id(p): i
for i, p in enumerate(group._parameters, start_index)
if id(p) not in param_mappings
}
)
packed["params"] = [param_mappings[id(p)] for p in group._parameters]
start_index += len(packed["params"])
return packed
param_groups = [pack_group(g) for g in self.param_groups]
packed_state = {
(param_mappings[id(k)] if isinstance(k, Tensor) else k): v
for k, v in self._state.items()
}
return {
"state": packed_state,
"param_groups": param_groups,
}
def step(self, closure: Union[Callable, None] = None) -> Union[Tensor, None]:
raise NotImplementedError()
def clip_grad(self):
for param_group in self.param_groups:
if param_group._enable_clip_grad:
clip_grad_norm_(
param_group.parameters,
param_group["clip_grad_max_norm"],
param_group["clip_grad_norm_type"],
True,
)
else:
warnings.warn(
"To enable clip_grad, passing the `clip_grad_max_norm` and `clip_grad_norm_type` parameters when instantializing the Optimizer."
)
def zero_grad(self, set_to_none: bool = False):
for param_group in self.param_groups:
for param in param_group.parameters:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
param.grad.zeros_()
def _parse_input_parameters(self, parameters):
if isinstance(parameters, collections.abc.Iterator):
self.param_groups.append(
ParamGroup({"params": list(parameters)}, self._default_options)
)
elif isinstance(parameters, collections.abc.Iterable):
if isinstance(parameters[0], dict):
for param in parameters:
assert isinstance(param, dict)
self.param_groups.append(ParamGroup(param, self._default_options))
else:
self.param_groups.append(
ParamGroup({"params": parameters}, self._default_options)
)
else:
raise TypeError(
f"params argument given to the optimizer should be an iterable of Tensors or dicts, but got {type(parameters)}"
)
def _generate_grad_clip_conf_for_optim_conf(self, param_group, optimizer_conf):
if param_group._enable_clip_grad:
if (
param_group["clip_grad_max_norm"] == 1.0
and param_group["clip_grad_norm_type"] == 2.0
):
optimizer_conf.mutable_clip_conf().mutable_clip_by_global_norm().set_clip_norm(
param_group["clip_grad_max_norm"]
)
else:
warnings.warn(
"For now, nn.Graph only support clip grad with `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`."
)
@property
def support_sparse(self):
return False
def _check_variables_in_graph(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if param not in vars_conf:
raise ValueError(
f"Parameter <{param}> is not in the corresponding nn.Graph/nn.Module."
" Please make sure you call the module's to(..)/to_consistent(...) method first,"
" then add the module's parameters into an optimizer."
)
def _check_variables_optimizer_bound(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if vars_conf[param].bound_optimizer is None:
vars_conf[param].bound_optimizer = self
elif vars_conf[param].bound_optimizer is not self:
raise ValueError(
f"<{vars_conf[param].name}> is already bound to another optimizer."
)
def _generate_indexed_slices_optimizer_conf(self, job_conf, vars_conf):
if not self.support_sparse:
raise ValueError(f"{self.__class__} does not support sparse updating.")
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
sparse_opt_conf = job_conf.mutable_indexed_slices_optimizer_conf()
sparse_variable_op_names = sparse_opt_conf.mutable_include_op_names()
sparse_variable_op_names.add_op_name(vars_conf[param].name)
| true | true |
f71c971bf4dd805103974078d53aae515b91c0a1 | 1,361 | py | Python | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | 1 | 2018-09-25T10:59:29.000Z | 2018-09-25T10:59:29.000Z | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | null | null | null | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | 1 | 2018-09-25T10:59:32.000Z | 2018-09-25T10:59:32.000Z | # Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CacheBase(object):
@abc.abstractmethod
def get(self, key, fill_cache_func):
"""Gets an entry from the cache implementation.
If there is a cache miss, ``fill_cache_func()`` will be evaluated to get the value.
:param key: A key identifying cache entry
:param fill_cache_func: This function will be evaluated (``fill_cache_func()``) to populate cache, if no
value is present in the cache.
:return: A value from cache
"""
pass
class NullCache(CacheBase):
"""A pass-through cache implementation: value generating function will be called each."""
def get(self, key, fill_cache_func):
return fill_cache_func()
| 33.195122 | 112 | 0.709772 |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CacheBase(object):
@abc.abstractmethod
def get(self, key, fill_cache_func):
pass
class NullCache(CacheBase):
def get(self, key, fill_cache_func):
return fill_cache_func()
| true | true |
f71c98221a39db59c80de17a016146f0be85cd00 | 6,266 | py | Python | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Tobias Weber <tweber@frm2.tum.de>
#
# *****************************************************************************
"""Mira-Stargate.
This is the shielding of the analyzer with 11 blocks. The att axis does not
move any elements under the blocks, so we can move to a new block state at
any time (in this implementation, before starting the axis).
Only 0, 1 or 2 blocks may be opened at a time. The first and last block should
not be opened since they are stationary.
The blocks are controlled via a Festo valve arrangement of 11 stable valves
represented by two bits that can be moved into open (01) or closed (10)
positions.
Festo uses Modbus, and the 22 needed output bits are distributed in the lower 8
bits of three consecutive 16-bit holding registers (offset_out). Readback is
done in three different holding registers with addresses n, n+2, n+4.
"""
from time import monotonic
from nicos.core import SIMULATION, Attach, InvalidValueError, Param, listof, \
status
from nicos.devices import entangle
from nicos_mlz.mira.devices.axis import HoveringAxis
class Stargate(entangle.DigitalOutput):
"""Device for controlling the MIRA-Stargate blocks."""
valuetype = listof(int)
parameters = {
'offset_in': Param('Offset of digital input values',
type=int, mandatory=True),
'offset_out': Param('Offset of digital output values',
type=int, mandatory=True),
'chevron_att_angles': Param('att angle for shielding elements',
type=listof(listof(int)),
mandatory=True),
}
_started = 0
def doRead(self, maxage=0):
words = self._dev.ReadOutputWords([self.offset_in, 5])
bitvals = [words[0], words[2], words[4]]
chevrons = []
for bitval in bitvals:
for _ in range(4):
chevrons.append(int(bitval & 0b11 == 0b01))
bitval >>= 2
return chevrons[:11]
def doStatus(self, maxage=0):
if self._started and self._started + 3 > monotonic():
return status.BUSY, 'moving/waiting'
return status.OK, ''
def doStart(self, target):
bitvals = [0, 0, 0]
for curidx in range(len(target)):
curval = target[curidx]
byteidx = curidx // 4
bitidx = (curidx % 4) * 2
if curval:
bitvals[byteidx] |= (1 << bitidx)
else:
bitvals[byteidx] |= (1 << (bitidx+1))
self._dev.WriteOutputWords([self.offset_out] + bitvals)
self._started = monotonic()
def doIsAllowed(self, value):
if len(value) != 11:
raise InvalidValueError(self, 'list must have 11 entries')
# map everything to 0 or 1
value = [bool(v) for v in value]
# check allowed positions
if value == [True] * 11:
# open everything is allowed
return True, ''
if sum(value) > 2:
return False, 'cannot open more than 2 chevrons'
if value[0] or value[10]:
return False, 'cannot open first or last chevron'
return True, ''
def doReadFmtstr(self):
return '[' + ', '.join(['%d'] * 11) + ']'
def get_chevrons_for_att(self, att):
chevrons = []
for curidx in range(len(self.chevron_att_angles)):
maxmin = self.chevron_att_angles[curidx]
if len(maxmin) < 2:
chevrons.append(0)
continue
if maxmin[1] < att < maxmin[0]:
chevrons.append(1)
else:
chevrons.append(0)
return chevrons
class ATT(HoveringAxis):
attached_devices = {
'stargate': Attach('stargate switch device', Stargate),
}
parameters = {
'movestargate': Param('Whether to move the stargate with the axis',
type=bool, settable=True, default=True),
}
def _move_stargate(self):
if self.movestargate:
self._attached_stargate.start(
self._attached_stargate.get_chevrons_for_att(self.target))
else:
self.log.warning('moving stargate blocks is disabled')
def _preMoveAction(self):
self._move_stargate()
HoveringAxis._preMoveAction(self)
def doStart(self, target):
# Since the _preMoveAction is not executed in simulation mode,
# we have to move the stargate here too.
if self._mode == SIMULATION:
self._move_stargate()
HoveringAxis.doStart(self, target)
def doStatus(self, maxage=0):
if not self.movestargate:
return HoveringAxis.doStatus(self, maxage)
sgstat = self._attached_stargate.status(maxage)
if sgstat[0] == status.BUSY:
return status.BUSY, 'stargate moving'
axstat = HoveringAxis.doStatus(self, maxage)
if axstat[0] == status.BUSY:
return axstat
axvalue = HoveringAxis.doRead(self, maxage)
chevrons = list(self._attached_stargate.read(maxage))
if chevrons != self._attached_stargate.get_chevrons_for_att(axvalue):
return status.ERROR, 'invalid stargate position for att angle'
return axstat
| 34.811111 | 79 | 0.608682 |
from time import monotonic
from nicos.core import SIMULATION, Attach, InvalidValueError, Param, listof, \
status
from nicos.devices import entangle
from nicos_mlz.mira.devices.axis import HoveringAxis
class Stargate(entangle.DigitalOutput):
valuetype = listof(int)
parameters = {
'offset_in': Param('Offset of digital input values',
type=int, mandatory=True),
'offset_out': Param('Offset of digital output values',
type=int, mandatory=True),
'chevron_att_angles': Param('att angle for shielding elements',
type=listof(listof(int)),
mandatory=True),
}
_started = 0
def doRead(self, maxage=0):
words = self._dev.ReadOutputWords([self.offset_in, 5])
bitvals = [words[0], words[2], words[4]]
chevrons = []
for bitval in bitvals:
for _ in range(4):
chevrons.append(int(bitval & 0b11 == 0b01))
bitval >>= 2
return chevrons[:11]
def doStatus(self, maxage=0):
if self._started and self._started + 3 > monotonic():
return status.BUSY, 'moving/waiting'
return status.OK, ''
def doStart(self, target):
bitvals = [0, 0, 0]
for curidx in range(len(target)):
curval = target[curidx]
byteidx = curidx // 4
bitidx = (curidx % 4) * 2
if curval:
bitvals[byteidx] |= (1 << bitidx)
else:
bitvals[byteidx] |= (1 << (bitidx+1))
self._dev.WriteOutputWords([self.offset_out] + bitvals)
self._started = monotonic()
def doIsAllowed(self, value):
if len(value) != 11:
raise InvalidValueError(self, 'list must have 11 entries')
value = [bool(v) for v in value]
if value == [True] * 11:
return True, ''
if sum(value) > 2:
return False, 'cannot open more than 2 chevrons'
if value[0] or value[10]:
return False, 'cannot open first or last chevron'
return True, ''
def doReadFmtstr(self):
return '[' + ', '.join(['%d'] * 11) + ']'
def get_chevrons_for_att(self, att):
chevrons = []
for curidx in range(len(self.chevron_att_angles)):
maxmin = self.chevron_att_angles[curidx]
if len(maxmin) < 2:
chevrons.append(0)
continue
if maxmin[1] < att < maxmin[0]:
chevrons.append(1)
else:
chevrons.append(0)
return chevrons
class ATT(HoveringAxis):
attached_devices = {
'stargate': Attach('stargate switch device', Stargate),
}
parameters = {
'movestargate': Param('Whether to move the stargate with the axis',
type=bool, settable=True, default=True),
}
def _move_stargate(self):
if self.movestargate:
self._attached_stargate.start(
self._attached_stargate.get_chevrons_for_att(self.target))
else:
self.log.warning('moving stargate blocks is disabled')
def _preMoveAction(self):
self._move_stargate()
HoveringAxis._preMoveAction(self)
def doStart(self, target):
if self._mode == SIMULATION:
self._move_stargate()
HoveringAxis.doStart(self, target)
def doStatus(self, maxage=0):
if not self.movestargate:
return HoveringAxis.doStatus(self, maxage)
sgstat = self._attached_stargate.status(maxage)
if sgstat[0] == status.BUSY:
return status.BUSY, 'stargate moving'
axstat = HoveringAxis.doStatus(self, maxage)
if axstat[0] == status.BUSY:
return axstat
axvalue = HoveringAxis.doRead(self, maxage)
chevrons = list(self._attached_stargate.read(maxage))
if chevrons != self._attached_stargate.get_chevrons_for_att(axvalue):
return status.ERROR, 'invalid stargate position for att angle'
return axstat
| true | true |
f71c98c738d67bea14753699412d0bb3f45ce1c4 | 237 | py | Python | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .document import DocumentArray
from .querylang import QueryLangArray
from .chunk import ChunkArray
from .match import MatchArray
| 29.625 | 74 | 0.801688 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .document import DocumentArray
from .querylang import QueryLangArray
from .chunk import ChunkArray
from .match import MatchArray
| true | true |
f71c9a76519602baf175d90363655dc76c65ea28 | 512 | py | Python | MobileRevelator/python/postbank_finanzassistent_decrypt.py | ohunecker/MR | b0c93436c7964d87a0b8154f8b7662b1731124b9 | [
"MIT"
] | 98 | 2019-02-03T22:50:24.000Z | 2022-03-17T12:50:56.000Z | MobileRevelator/python/postbank_finanzassistent_decrypt.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 10 | 2019-03-14T20:12:10.000Z | 2020-05-23T10:37:54.000Z | MobileRevelator/python/postbank_finanzassistent_decrypt.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 30 | 2019-02-03T22:50:27.000Z | 2022-03-30T12:37:30.000Z | #Filename="finanzassistent"
#Type=Prerun
import os
def main():
ctx.gui_setMainLabel("Postbank Finanzassistent: Extracting key");
error=""
dbkey="73839EC3A528910B235859947CC8424543D7B686"
ctx.gui_setMainLabel("Postbank: Key extracted: " + dbkey)
if not (ctx.fs_sqlcipher_decrypt(filename, filename + ".dec", dbkey)):
error="Error: Wrong key for decryption."
if (error==""):
return "Postbank Finanzassistent: Decryption of database successful."
return "" | 34.133333 | 78 | 0.6875 |
import os
def main():
ctx.gui_setMainLabel("Postbank Finanzassistent: Extracting key");
error=""
dbkey="73839EC3A528910B235859947CC8424543D7B686"
ctx.gui_setMainLabel("Postbank: Key extracted: " + dbkey)
if not (ctx.fs_sqlcipher_decrypt(filename, filename + ".dec", dbkey)):
error="Error: Wrong key for decryption."
if (error==""):
return "Postbank Finanzassistent: Decryption of database successful."
return "" | true | true |
f71c9ac104ae461bd523cc38b814d19111b44e47 | 1,166 | py | Python | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"FeedItemTargetDeviceEnum",},
)
class FeedItemTargetDeviceEnum(proto.Message):
r"""Container for enum describing possible data types for a feed
item target device.
"""
class FeedItemTargetDevice(proto.Enum):
r"""Possible data types for a feed item target device."""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.15 | 74 | 0.716123 |
import proto
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"FeedItemTargetDeviceEnum",},
)
class FeedItemTargetDeviceEnum(proto.Message):
class FeedItemTargetDevice(proto.Enum):
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f71c9b79db447996719fed63c8fac35684923c7b | 3,915 | py | Python | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.openstack.common import log as logging
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
"""Ironic HostManager class."""
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| 38.382353 | 78 | 0.676373 |
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.openstack.common import log as logging
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
def update_from_compute_node(self, compute):
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| true | true |
f71c9cd673a863c06787408e99e849774b777b45 | 931 | py | Python | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | null | null | null | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | null | null | null | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | 1 | 2020-10-09T14:43:21.000Z | 2020-10-09T14:43:21.000Z | from decrypt_file import decrypt
from get_commands import fetch_commands
import netmiko
import os
import concurrent.futures
hosts = decrypt(f'{os.getcwd()}/device_json.gpg')
def send_commands(connection, host, commands):
connection.send_config_set(commands)
return
def run(ip_address):
for device in hosts:
device_info = {
"username": hosts[device][0],
"port": 22,
"device_type": hosts[device][-2],
"host": ip_address,
"verbose": True,
"password": hosts[device][1]
}
connect = netmiko.ConnectHandler(**device_info)
commands = fetch_commands(hosts[device][-1])
send_commands(connect, device_info['host'], commands)
return
if __name__ == '__main__':
with concurrent.futures.ThreadPoolExecutor() as executor:
host_addresses = [hosts[ip][2] for ip in hosts]
executor.map(run, host_addresses)
| 26.6 | 61 | 0.654135 | from decrypt_file import decrypt
from get_commands import fetch_commands
import netmiko
import os
import concurrent.futures
hosts = decrypt(f'{os.getcwd()}/device_json.gpg')
def send_commands(connection, host, commands):
connection.send_config_set(commands)
return
def run(ip_address):
for device in hosts:
device_info = {
"username": hosts[device][0],
"port": 22,
"device_type": hosts[device][-2],
"host": ip_address,
"verbose": True,
"password": hosts[device][1]
}
connect = netmiko.ConnectHandler(**device_info)
commands = fetch_commands(hosts[device][-1])
send_commands(connect, device_info['host'], commands)
return
if __name__ == '__main__':
with concurrent.futures.ThreadPoolExecutor() as executor:
host_addresses = [hosts[ip][2] for ip in hosts]
executor.map(run, host_addresses)
| true | true |
f71c9dde7d847171940268a4386ef04e1c81c1ea | 20,567 | py | Python | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 12:29:21 2021
@author: aduell
"""
#import numpy as np
from numpy import pi, linspace, array, exp
#import tmm
from tmm import inc_tmm, inc_absorp_in_each_layer, inf
#import pandas as pd
#import tmm_vw as tmm
#import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
from wpv import Layer, Stack
#import scipy.interpolate, scipy.integrate, pandas, sys
from scipy.interpolate import interp1d
from scipy.integrate import quad, trapz
from scipy.optimize import fsolve#, Bounds
import scipy.optimize
from pandas import read_excel
import sys
#import scipy
#from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e, A, ohm
#import sympy
#import sympy.solvers.solvers
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
import tmmPVColor as pvc
import CalculateVLTFromSpectrum as cvs
from CalculateVLTFromSpectrum import AM15G, cieplf
import vegas
# This whole thing uses microns for length
'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''
def giveincangle(angle):
degree = pi/180
return angle*degree
inc_angle = giveincangle(0)
'''We determine the size and scaling of the photon wavelength scale. Units are um'''
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams) #um
'''We are constants and help control units'''
q = 1.602176634e-19 #coulombs. elementary charge
c0 = 299792458 #m/s #Speed of light
hPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s
kB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K
'''Some units and terms'''
'''Tcell, Ti, To are cell temperature, inside temp and outside temp. Always in kelvin'''
'''Ui and Uo are overall heat-transfer coefficient ofr in side and outside. W/(m**2 *K)'''
'''AbsorberLayer is a number indicating the photoactive layer. If the fourth layer is the PV layer, input is 4'''
''''Rs is series resistance, Rsh is shunt resistance in ohms. See pveducation.org for more info'''
'''eta is the electron-hole pair extraction efficiency term. eta times all absorbed light in the PV layer gives the EQE'''
'''n = diode ideality factor. Used in singlediode equation
Ns = number of cells in series. Used in singlediode equation'''
'''Rtot is total thermal resistance of the window'''
'''We are all the different materials currently available
Thickness is in microns'''
def Glass(Thickness = 6000):
return Layer(Thickness,'nkLowFeGlass','i')
def TiO2(Thickness = 0.050):
return Layer(Thickness,'nkTiO2','c')
def FTO(Thickness = 0.250):
return Layer(Thickness,'nkFTO','c')
def MAPI(Thickness = 0.130):
return Layer(Thickness,'nkMAPI','c')
def AZO(Thickness = 0.200):
return Layer(Thickness,'nkAZO','c')
def ITO(Thickness = 0.200):
return Layer(Thickness,'nkITO','c')
def ITOlowE(Thickness = 0.075):
return Layer(Thickness,'nkITO','c')
def SnO2(Thickness = 0.05):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowEfat(Thickness = 0.050):
return Layer(Thickness,'nkSnO2','c')
def SiO2(Thickness = 0.024):
return Layer(Thickness,'nkSiO2','c')
def NiO(Thickness = 0.050):
return Layer(Thickness,'nkNiO','c')
def Ag(Thickness = 0.015):
return Layer(Thickness,'nkAg','c')
def TiO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkTiO2','c')
def TiO2lowEfat(Thickness = 0.060):
return Layer(Thickness,'nkTiO2','c')
def Bleach(Thickness = 0.370):
return Layer(Thickness,'nkBleach','c')
def ClAlPc(Thickness = 0.300):
return Layer(Thickness,'nkClAlPc','c')
def C60(Thickness = 0.200):
return Layer(Thickness,'nkC60','c')
def IR(Thickness = 0.060):
return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')
def MAPBr(Thickness = 0.500):
return Layer(Thickness,'nkMAPbBr3','c')
def EVA(Thickness = 3000):
return Layer(Thickness,'nkEVA','i')
'''We are boundary conditions corresponding to each material type
Can be changed to tune optimization range'''
GlassBound = (5999,6001)
TiO2Bound = (0.025,.1)
FTOBound = (0.1,0.5)
MAPIBound = (.06,.260)
AZOBound = (.1,.4)
ITOBound = (.1,.4)
ITOlowEBound = (0.03,.15)
SnO2Bound = (.025,.1)
SnO2lowEBound = (.015,.06)
SnO2lowEfatBound = (0.025,.1)
SiO2Bound = (.012,.05)
NiOBound = (.025,.1)
AgBound = (.0149, .0151)
TiO2lowEBound = (.015, .070)
TiO2lowEfatBound = (.03,.12)
BleachBound = (.180, .500)
ClAlPcBound = (.150, .600)
C60Bound = (.100,.400)
IRBound = (.030, .12)
MAPBrBound = (.250,1)
EVABound = (2999,3001)
'''I assemble a list of layer objects using Thicknesses and Materials'''
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
'''I give a list of boundaries from a list of materials. Dict is a dictionary containing the boundary conditions
All items in the dicitonary are labelled as 'Material'+'Bound' '''
'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
Bounds = []
for i in range(x):
Bounds.append(DictBound[Materials[i].__name__ + 'Bound'])
Bounds = array(Bounds)
return Bounds
'''
'''I produce a Bounds object that defines the boundary conditions for optimization
The version above can be used to produce a list of bounds rather than an object'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
lb = []
ub = []
for i in range(x):
lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])
for i in range(x):
ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])
bounds = scipy.optimize.Bounds(lb,ub)
return bounds
'''I give a list of thicknesses from a list of materials. Dict is a dictionary containing the thickness values
All items in the dicitonary are labelled as 'Material'+'Th' '''
def GiveThicks(Materials, DictTh):
x = len(Materials)
Th = []
for i in range(x):
Th.append(DictTh[Materials[i].__name__ + 'Th'])
return Th
'''Calculates Spectra Based on the layers of the cell
AbsorberLayer is an integer giving the position of the PV layer in the stack. Currently supports 1 PV layer'''
def Spectra(layers, AbsorberLayer):
thicks = [inf]
iorcs = ['i']
for layer in layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
Ts = []
Rfs = []
Rbs = []
AbsByAbsorbers = []
#EQEs2 = []
#IREQEs = []
layerchoice = AbsorberLayer
#layerchoice2 = 5
for lam in lams:
nks = [1]
for layer in layers:
nks.append(layer.nk(lam))
nks.append(1)
nks_bw = nks[::-1]
front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]
AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]
AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )
# EQE_spol2 = tmm.inc_absorp_in_each_layer(front_spol)[layerchoice2]
# EQE_ppol2 = tmm.inc_absorp_in_each_layer(front_ppol)[layerchoice2]
# EQEs2.append( (EQE_spol2 + EQE_ppol2) / 2. )
Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)
Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)
Ts.append( (front_spol['T']+front_ppol['T']) / 2. )
Ts = array(Ts)
Rfs = array(Rfs)
Rbs = array(Rbs)
As = 1-Ts-Rfs
sanities = Ts+Rfs+As
AbsByAbsorbers = array(AbsByAbsorbers)
Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}
return Spectra
''' Here I calculate VLT and spit it out to the screen'''
'''Gives a spectrum of VLT. Used for plotting'''
def VLTSpectrum(layers):
return Stack(layers)
'''Gives VLT as a single number'''
def VLT(layers):
VLTstack=Stack(layers)
return VLTstack.get_visible_light_transmission(lams,inc_angle)
'''This gives VLT as a single number. eliminates
need to recalculate AM15G and cieplf every iteration. Unclear if this will work for
optimization'''
def getFancyVLT(layers):#,lamrange,inc_angle):
integ = vegas.Integrator([lams])
Trans=Stack(layers)
numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]
denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]
VLT = numerator/denominator
return VLT.mean
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Only useful for judging VLT constraint for a given PV material
Requires input of single absorber layer with a tuple of (lb,ub)'''
def GiveMinMaxVLT(AbsorberType, Bounds):
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Requires list of materials, absorbing layer, and absorber bounds'''
def GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):
AbsorberType = Materials[AbsorberLayer-1]
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
# ******************** Here I add PCE calculation *********************#
'''This stuff imports a spreadsheet of the solar spectrum'''
#worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
worksheet = read_excel('./Data/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
#worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')
downloaded_array = array(worksheet)
# Wavelength is in column 0, AM1.5G data is column 2
AM15 = downloaded_array[1:, [0,2]]
# The first line should be 280.0 , 4.7309E-23
# The last line should be 4000.0, 7.1043E-03
# print(AM15)
# Interpolate to get a continuous function which I will be able to do integrals on:
'''Interpolated solar spectrum
when using, inputs must be within 300-2500 nm'''
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
# Here’s the plot, it looks correct:
'''Plot of the solar spectrum for verification'''
'''
y_values = np.array([AM15interp(x) for x in lams])
figure()
plot(lams , y_values)
xlabel("Wavelength (nm)")
ylabel("Spectral intensity (W/m$^2$/nm)")
title("Light from the sun");
show()
'''
'''I convert wavelength to energy. E_min and max are used for integration limits '''
Ephoton = hPlanck * c0 / lams *1e6 #J
E_min = min(Ephoton) #J energy units from hPlanck
E_max = max(Ephoton) #J energy units from hPlanck
'''I give the number of photons per......'''
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6 #um
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
'''I give the power for each......'''
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''
def Solar_Constant(Ephoton):
#PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide
# the messages warning about poor accuracy in integrating.
'''This is the solar constant value. It is called by optimization and used in a variety of functions here
Should always be ~1000'''
solar_constant = Solar_Constant(Ephoton)
'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
'''I return an interpolated function of a spectrum relative to photon energy'''
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
'''I give Q based on a given spectrum. Units are W/m^2
Input is a spectrum interpolated with respect to energy, E
eta should only be used if looking at a PV layer. Otherwise it is set to 1'''
def GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
'''
#trapz calcs
def GiveQ(Spectra, eta = 1):#Spectra must be an array
integrand = eta*Spectra*PowerPerTEA(Ephoton)
return -np.trapz(integrand, Ephoton)
'''
'''
def GivePhotons(Spectra, eta):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max)[0]
'''
# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)
# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed
'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV
#units = photons/(s*m**2)
'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''
def Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return quad(integrand, E_min, E_max, full_output=1)[0]
#units photons/(s*m**2)
'''
#Using trapezoidal rule for integration instaed of quad
#AbsByAbsorbers is an aray of intensities, not an interpolated function.
def RR0(eta,Absorbed,Tcell):
AbsByAbsorbers = AbsByAbsorbers.round(8)
integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)
integral = trapz(integrand, Ephoton)
return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral
def Generated(eta,Absorbed):
Absorbed = Absorbed.round(8)
integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return np.trapz(integrand, Ephoton)
'''
'''I use the single diode equation to return the max power of the cell in watts
Check PVlib documentation for details'''
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin
TotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated.
Absorbed is PV layer absorptance interpolated
Temperature calculation is implicit so the numerical solver fsolve is used.
This equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
'''I use the single diode equation to produce an IV curve and power plot
I also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts
See pvlib singlediode equation for more information'''
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
Vvalues = array(data['v'])
Ivalues = array(data['i'])
#print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
'''I give the solar heat gain coefficient. unitless numebr between 0 and 1
Ts is the transmission spectra. Must be a list of intensities, not an interpolated function
This equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows
and equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''
def SHGC(Ts, Ti, To, Tcell, Ui):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
Rtot = 1/Ui #This is approximate because Ui is assumed
#Included in GiveQ for simplicity but should not be used for calculating SHGC
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
'''I give max efficiency also called PCE'''
'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
#Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#Spectral Curves
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
#plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
pvc.GiveColorSwatch(Ts, Rfs)
pvc.plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
| 37.876611 | 196 | 0.685516 |
from numpy import pi, linspace, array, exp
from tmm import inc_tmm, inc_absorp_in_each_layer, inf
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
from wpv import Layer, Stack
from scipy.interpolate import interp1d
from scipy.integrate import quad, trapz
from scipy.optimize import fsolve
import scipy.optimize
from pandas import read_excel
import sys
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
import tmmPVColor as pvc
import CalculateVLTFromSpectrum as cvs
from CalculateVLTFromSpectrum import AM15G, cieplf
import vegas
def giveincangle(angle):
degree = pi/180
return angle*degree
inc_angle = giveincangle(0)
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams)
q = 1.602176634e-19
c0 = 299792458 607015e-34 ef Glass(Thickness = 6000):
return Layer(Thickness,'nkLowFeGlass','i')
def TiO2(Thickness = 0.050):
return Layer(Thickness,'nkTiO2','c')
def FTO(Thickness = 0.250):
return Layer(Thickness,'nkFTO','c')
def MAPI(Thickness = 0.130):
return Layer(Thickness,'nkMAPI','c')
def AZO(Thickness = 0.200):
return Layer(Thickness,'nkAZO','c')
def ITO(Thickness = 0.200):
return Layer(Thickness,'nkITO','c')
def ITOlowE(Thickness = 0.075):
return Layer(Thickness,'nkITO','c')
def SnO2(Thickness = 0.05):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowEfat(Thickness = 0.050):
return Layer(Thickness,'nkSnO2','c')
def SiO2(Thickness = 0.024):
return Layer(Thickness,'nkSiO2','c')
def NiO(Thickness = 0.050):
return Layer(Thickness,'nkNiO','c')
def Ag(Thickness = 0.015):
return Layer(Thickness,'nkAg','c')
def TiO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkTiO2','c')
def TiO2lowEfat(Thickness = 0.060):
return Layer(Thickness,'nkTiO2','c')
def Bleach(Thickness = 0.370):
return Layer(Thickness,'nkBleach','c')
def ClAlPc(Thickness = 0.300):
return Layer(Thickness,'nkClAlPc','c')
def C60(Thickness = 0.200):
return Layer(Thickness,'nkC60','c')
def IR(Thickness = 0.060):
return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')
def MAPBr(Thickness = 0.500):
return Layer(Thickness,'nkMAPbBr3','c')
def EVA(Thickness = 3000):
return Layer(Thickness,'nkEVA','i')
GlassBound = (5999,6001)
TiO2Bound = (0.025,.1)
FTOBound = (0.1,0.5)
MAPIBound = (.06,.260)
AZOBound = (.1,.4)
ITOBound = (.1,.4)
ITOlowEBound = (0.03,.15)
SnO2Bound = (.025,.1)
SnO2lowEBound = (.015,.06)
SnO2lowEfatBound = (0.025,.1)
SiO2Bound = (.012,.05)
NiOBound = (.025,.1)
AgBound = (.0149, .0151)
TiO2lowEBound = (.015, .070)
TiO2lowEfatBound = (.03,.12)
BleachBound = (.180, .500)
ClAlPcBound = (.150, .600)
C60Bound = (.100,.400)
IRBound = (.030, .12)
MAPBrBound = (.250,1)
EVABound = (2999,3001)
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
def GiveBounds(Materials, DictBound):
x = len(Materials)
lb = []
ub = []
for i in range(x):
lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])
for i in range(x):
ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])
bounds = scipy.optimize.Bounds(lb,ub)
return bounds
def GiveThicks(Materials, DictTh):
x = len(Materials)
Th = []
for i in range(x):
Th.append(DictTh[Materials[i].__name__ + 'Th'])
return Th
def Spectra(layers, AbsorberLayer):
thicks = [inf]
iorcs = ['i']
for layer in layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
Ts = []
Rfs = []
Rbs = []
AbsByAbsorbers = []
layerchoice = AbsorberLayer
for lam in lams:
nks = [1]
for layer in layers:
nks.append(layer.nk(lam))
nks.append(1)
nks_bw = nks[::-1]
front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]
AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]
AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )
Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)
Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)
Ts.append( (front_spol['T']+front_ppol['T']) / 2. )
Ts = array(Ts)
Rfs = array(Rfs)
Rbs = array(Rbs)
As = 1-Ts-Rfs
sanities = Ts+Rfs+As
AbsByAbsorbers = array(AbsByAbsorbers)
Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}
return Spectra
def VLTSpectrum(layers):
return Stack(layers)
def VLT(layers):
VLTstack=Stack(layers)
return VLTstack.get_visible_light_transmission(lams,inc_angle)
def getFancyVLT(layers):
integ = vegas.Integrator([lams])
Trans=Stack(layers)
numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]
denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]
VLT = numerator/denominator
return VLT.mean
def GiveMinMaxVLT(AbsorberType, Bounds):
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
def GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):
AbsorberType = Materials[AbsorberLayer-1]
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
worksheet = read_excel('./Data/ASTMG173.xls')
downloaded_array = array(worksheet)
AM15 = downloaded_array[1:, [0,2]]
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
Ephoton = hPlanck * c0 / lams *1e6
E_min = min(Ephoton)
E_max = max(Ephoton)
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
def Solar_Constant(Ephoton):
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
solar_constant = Solar_Constant(Ephoton)
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
def GiveQ(Spectra, eta = 1):
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integralf Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
Vvalues = array(data['v'])
Ivalues = array(data['i'])
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
def SHGC(Ts, Ti, To, Tcell, Ui):
Rtot = 1/Ui
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
pvc.GiveColorSwatch(Ts, Rfs)
pvc.plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
| true | true |
f71c9f9d367cb8155ed384c51b60c4ecac3f16c3 | 447 | py | Python | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plan_marker', '0002_userprofile_plan_created'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='plan_created',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| 22.35 | 74 | 0.630872 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plan_marker', '0002_userprofile_plan_created'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='plan_created',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| true | true |
f71ca0e23cd8fb822e78350418aeea8241322271 | 1,142 | py | Python | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | 3 | 2017-07-23T22:32:22.000Z | 2020-05-03T20:16:36.000Z | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | null | null | null | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | 1 | 2021-09-10T08:45:39.000Z | 2021-09-10T08:45:39.000Z | #!/usr/bin/env python
# Copyright 2012 Yummy Melon Software LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Charles Y. Choi
#
"""
sequenceplot is a module that generates UML sequence diagrams using the UMLGraph package.
"""
__version__ = '0.4'
class SyntaxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def picEscapeString(buf):
result = buf.replace('"', '\\"')
return result
from SequenceObject import SequenceObject
from Placeholder import Placeholder
from Actor import Actor
from SequenceDiagram import SequenceDiagram
| 26.55814 | 89 | 0.738179 |
__version__ = '0.4'
class SyntaxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def picEscapeString(buf):
result = buf.replace('"', '\\"')
return result
from SequenceObject import SequenceObject
from Placeholder import Placeholder
from Actor import Actor
from SequenceDiagram import SequenceDiagram
| true | true |
f71ca16a0d7d9c01229a650639558eb2857cf6b5 | 681 | py | Python | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | # This is an example of a very basic discord bot in python
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=".", description="A basic discord bot")
@bot.event
async def on_ready():
print("I'm online!")
@commands.command(name="ping")
async def _ping(ctx):
latency = bot.latency * 1000 # convert to ms
embed = discord.Embed(
title="Pong!", # make an embed to send
description=f"My latency is {latency:.2f}ms",
)
await ctx.send(embed=embed)
bot.add_command(_ping)
if __name__ == "__main__": # make sure the file isn't being imported
bot.run("YOUR_TOKEN_HERE") # put your own bot token in here
| 23.482759 | 73 | 0.678414 |
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=".", description="A basic discord bot")
@bot.event
async def on_ready():
print("I'm online!")
@commands.command(name="ping")
async def _ping(ctx):
latency = bot.latency * 1000 # convert to ms
embed = discord.Embed(
title="Pong!", # make an embed to send
description=f"My latency is {latency:.2f}ms",
)
await ctx.send(embed=embed)
bot.add_command(_ping)
if __name__ == "__main__": # make sure the file isn't being imported
bot.run("YOUR_TOKEN_HERE")
| true | true |
f71ca30466bc275ef559c5fc42e0c93a4703385c | 1,407 | py | Python | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | 2 | 2015-03-06T15:22:02.000Z | 2016-03-11T13:35:48.000Z | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | null | null | null | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from csvitself import csv2csv
from fixed import fixed2csv
from js import json2csv
from xls import xls2csv
SUPPORTED_FORMATS = ['fixed', 'xls', 'csv']
def convert(f, format, schema=None, key=None, **kwargs):
"""
Convert a file of a specified format to CSV.
"""
if not f:
raise ValueError('f must not be None')
if not format:
raise ValueError('format must not be None')
if format == 'fixed':
if not schema:
raise ValueError('schema must not be null when format is "fixed"')
return fixed2csv(f, schema, **kwargs)
elif format == 'xls':
return xls2csv(f, **kwargs)
elif format == 'js':
return json2csv(f, key, **kwargs)
elif format == 'csv':
return csv2csv(f, **kwargs)
else:
raise ValueError('format "%s" is not supported' % format)
def guess_format(filename):
"""
Try to guess a file's format based on its extension (or lack thereof).
"""
last_period = filename.rfind('.')
if last_period == -1:
# No extension: assume fixed-width
return 'fixed'
extension = filename[last_period + 1:]
if extension == 'xls':
return extension
elif extension in ['json', 'js']:
return 'js'
elif extension == 'csv':
return extension
elif extension == 'fixed':
return extension
return None
| 25.125 | 78 | 0.606254 |
from csvitself import csv2csv
from fixed import fixed2csv
from js import json2csv
from xls import xls2csv
SUPPORTED_FORMATS = ['fixed', 'xls', 'csv']
def convert(f, format, schema=None, key=None, **kwargs):
if not f:
raise ValueError('f must not be None')
if not format:
raise ValueError('format must not be None')
if format == 'fixed':
if not schema:
raise ValueError('schema must not be null when format is "fixed"')
return fixed2csv(f, schema, **kwargs)
elif format == 'xls':
return xls2csv(f, **kwargs)
elif format == 'js':
return json2csv(f, key, **kwargs)
elif format == 'csv':
return csv2csv(f, **kwargs)
else:
raise ValueError('format "%s" is not supported' % format)
def guess_format(filename):
last_period = filename.rfind('.')
if last_period == -1:
return 'fixed'
extension = filename[last_period + 1:]
if extension == 'xls':
return extension
elif extension in ['json', 'js']:
return 'js'
elif extension == 'csv':
return extension
elif extension == 'fixed':
return extension
return None
| true | true |
f71ca381286ae5e3aa87acbe71537fe119e50954 | 4,491 | py | Python | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import torch.distributions as D
import torch.nn.functional as F
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
# testing synthetic dataset
x = torch.randn((100,3,28,28))
d = TensorDataset(x)
z = d.__getitem__(2) # retuns 1-tuple of tensor (no label)
z[0].shape
# with labels
y = torch.randint(low=0, high=1, size=(100,))
d = TensorDataset(x,y)
z = d.__getitem__(2) # retuns 1-tuple of tensor (no label)
z[0].shape
z[1].shape
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
bce = nn.BCELoss(reduction="none")
x = torch.tensor(0.5)
y = torch.tensor(0.7)
bce(x,y)
f = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)
f(x,y)
torch.softmax(torch.tensor([1,2,3]), 0, torch.float64)
# generate mixed distributions
m = D.OneHotCategorical(torch.tensor([1,2,3,6]))
m.sample()
m.sample_n(10)
m.sample((3,4))
m = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))
m.sample((3,4))
# Example of target with class indices
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(input, target)
output.backward()
# Example of target with class probabilities
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5).softmax(dim=1)
output = loss(input, target)
output.backward()
input = torch.randn((3, 2), requires_grad=True)
target = torch.rand((3, 2), requires_grad=False)
loss = F.binary_cross_entropy(F.sigmoid(input), target)
loss.backward()
loss = nn.BCELoss(reduction="none")
x = torch.tensor([0,0.25,0.5,0.75,1])
F.binary_cross_entropy(x,x,reduction="none")
loss(x,x)
x = torch.tensor([0,25,0.5,0.75,1])
y = torch.tensor([0,0.25,0.5,0.75,1])
loss(x,y)
| 25.959538 | 91 | 0.649521 | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import torch.distributions as D
import torch.nn.functional as F
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
x = torch.randn((100,3,28,28))
d = TensorDataset(x)
z = d.__getitem__(2)
z[0].shape
y = torch.randint(low=0, high=1, size=(100,))
d = TensorDataset(x,y)
z = d.__getitem__(2)
z[0].shape
z[1].shape
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
bce = nn.BCELoss(reduction="none")
x = torch.tensor(0.5)
y = torch.tensor(0.7)
bce(x,y)
f = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)
f(x,y)
torch.softmax(torch.tensor([1,2,3]), 0, torch.float64)
m = D.OneHotCategorical(torch.tensor([1,2,3,6]))
m.sample()
m.sample_n(10)
m.sample((3,4))
m = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))
m.sample((3,4))
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(input, target)
output.backward()
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5).softmax(dim=1)
output = loss(input, target)
output.backward()
input = torch.randn((3, 2), requires_grad=True)
target = torch.rand((3, 2), requires_grad=False)
loss = F.binary_cross_entropy(F.sigmoid(input), target)
loss.backward()
loss = nn.BCELoss(reduction="none")
x = torch.tensor([0,0.25,0.5,0.75,1])
F.binary_cross_entropy(x,x,reduction="none")
loss(x,x)
x = torch.tensor([0,25,0.5,0.75,1])
y = torch.tensor([0,0.25,0.5,0.75,1])
loss(x,y)
| true | true |
f71ca389de2acdd4122644dc61a4fb411c6d4bf0 | 4,451 | py | Python | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 53 | 2018-11-17T03:29:55.000Z | 2022-03-18T02:36:25.000Z | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 3 | 2018-11-28T11:37:51.000Z | 2019-01-30T01:52:45.000Z | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 35 | 2018-11-17T03:29:57.000Z | 2022-03-23T17:57:06.000Z | # coding=utf-8
from geoist.snoopy.algorithms.correlator_algorithms import CorrelatorAlgorithm
from geoist.snoopy.modules.correlation_result import CorrelationResult
from geoist.snoopy.constants import (DEFAULT_SHIFT_IMPACT,
DEFAULT_ALLOWED_SHIFT_SECONDS)
class CrossCorrelator(CorrelatorAlgorithm):
"""
Method 1: CrossCorrelation algorithm.
Ideas come from Paul Bourke(http://paulbourke.net/miscellaneous/correlate/).
"""
def __init__(self, time_series_a, time_series_b, max_shift_seconds=None, shift_impact=None):
"""
Initializer
:param TimeSeries time_series_a: TimeSeries a.
:param TimeSeries time_series_b: TimeSeries b.
:param int max_shift_milliseconds: allowed maximal shift seconds.
:param time_period: if given, correlate the data inside the time period only.
"""
super(CrossCorrelator, self).__init__(self.__class__.__name__, time_series_a, time_series_b)
self.shift_impact = shift_impact or DEFAULT_SHIFT_IMPACT
if max_shift_seconds is not None:
self.max_shift_milliseconds = max_shift_seconds
else:
self.max_shift_milliseconds = DEFAULT_ALLOWED_SHIFT_SECONDS * 1000
def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation)
def _find_allowed_shift(self, timestamps):
"""
Find the maximum allowed shift steps based on max_shift_milliseconds.
param list timestamps: timestamps of a time series.
"""
init_ts = timestamps[0]
residual_timestamps = [ts - init_ts for ts in timestamps]
n = len(residual_timestamps)
return self._find_first_bigger(residual_timestamps, self.max_shift_milliseconds, 0, n)
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
"""
Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search.
"""
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
pos = int(pos)
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos
| 43.637255 | 128 | 0.642103 |
from geoist.snoopy.algorithms.correlator_algorithms import CorrelatorAlgorithm
from geoist.snoopy.modules.correlation_result import CorrelationResult
from geoist.snoopy.constants import (DEFAULT_SHIFT_IMPACT,
DEFAULT_ALLOWED_SHIFT_SECONDS)
class CrossCorrelator(CorrelatorAlgorithm):
def __init__(self, time_series_a, time_series_b, max_shift_seconds=None, shift_impact=None):
super(CrossCorrelator, self).__init__(self.__class__.__name__, time_series_a, time_series_b)
self.shift_impact = shift_impact or DEFAULT_SHIFT_IMPACT
if max_shift_seconds is not None:
self.max_shift_milliseconds = max_shift_seconds
else:
self.max_shift_milliseconds = DEFAULT_ALLOWED_SHIFT_SECONDS * 1000
def _detect_correlation(self):
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation)
def _find_allowed_shift(self, timestamps):
init_ts = timestamps[0]
residual_timestamps = [ts - init_ts for ts in timestamps]
n = len(residual_timestamps)
return self._find_first_bigger(residual_timestamps, self.max_shift_milliseconds, 0, n)
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
pos = int(pos)
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos
| true | true |
f71ca44defb36643ad8a93f4726f956b8b913e57 | 346 | py | Python | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0x7FFFFFFF for _ in range(len(cost)+2)]
dp[0] = dp[1] = 0
for i, v in enumerate(cost):
v += dp[i]
dp[i+1] = min(dp[i+1], v)
dp[i+2] = min(dp[i+2], v)
return dp[len(cost)] | 31.454545 | 60 | 0.508671 | from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0x7FFFFFFF for _ in range(len(cost)+2)]
dp[0] = dp[1] = 0
for i, v in enumerate(cost):
v += dp[i]
dp[i+1] = min(dp[i+1], v)
dp[i+2] = min(dp[i+2], v)
return dp[len(cost)] | true | true |
f71ca45c2a4d1c7deaea184b4a83e5e006c32425 | 90 | py | Python | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 319 | 2015-01-02T11:34:16.000Z | 2022-03-25T00:43:33.000Z | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 10 | 2015-02-03T02:33:09.000Z | 2021-11-09T21:41:00.000Z | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 61 | 2015-01-02T12:01:56.000Z | 2021-12-08T07:16:16.000Z | """string multiplication"""
def main():
a = 'hi'
b = a * 2
TestError( b == 'hihi' )
| 10 | 27 | 0.522222 |
def main():
a = 'hi'
b = a * 2
TestError( b == 'hihi' )
| true | true |
f71ca4a04ecbc21aada0d63286c6160730dff7df | 1,204 | py | Python | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
"""
Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,
but additionally supports :meth:`log_prob` .
:param ~torch.distributions.Distribution base_dist: The distribution to
reflect.
"""
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| 37.625 | 79 | 0.709302 | from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| true | true |
f71ca57230e7a9c4e629ca823816dd4a71bdd7a4 | 572 | py | Python | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from .in_states import STATE_CHOICES
class INStateField(CharField):
"""
A model field that forms represent as a ``forms.INStateField`` field and
stores the two-letter Indian state abbreviation in the database.
"""
description = _("Indian state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(INStateField, self).__init__(*args, **kwargs)
| 31.777778 | 76 | 0.708042 | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from .in_states import STATE_CHOICES
class INStateField(CharField):
description = _("Indian state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(INStateField, self).__init__(*args, **kwargs)
| true | true |
f71ca6f85ce1ce3a97c314e0b3fd3109c786d615 | 31,747 | py | Python | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | null | null | null | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | 1 | 2022-02-07T21:13:55.000Z | 2022-02-07T21:13:55.000Z | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.db.models import Q
import asyncio
from ib_insync import IB, Stock, MarketOrder, util
from core.common import empty_append
from core.indicators import rel_dif
import vectorbtpro as vbt
import sys
import math
import pandas as pd
import numpy as np
from trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,
IB_LOCALHOST, IB_PORT)
### Interactive brockers and data retrieval ###
'''
Contains:
- Communication with Interactive brokers
- Retrieval of live data (Interactive brokers or YFinance)
- Performing order
- Models for financial products, stock exchanges...
Note: for some reasons, it does not work if myIB class is not in models
'''
## All symbols must be from same stock exchange
def retrieve_data(symbols,period,**kwargs):
try:
IBok=True
for symbol in symbols:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if action.stock_ex.ib_ticker in ["BVME.ETF"]:
IBok=False
break
index_symbol=exchange_to_symbol(action)
if (USE_IB_FOR_DATA and IBok) or kwargs.get("useIB",False):
fig= ''.join(x for x in period if x.isdigit())
if period.find("d")!=-1:
period_ib=fig +" D"
elif period.find("mo")!=-1:
period_ib=fig +" M"
elif period.find("y")!=-1:
period_ib=fig +" Y"
#Time period of one bar. Must be one of: ‘1 secs’, ‘5 secs’, ‘10 secs’ 15 secs’, ‘30 secs’, ‘1 min’, ‘2 mins’, ‘3 mins’, ‘5 mins’, ‘10 mins’, ‘15 mins’, ‘20 mins’, ‘30 mins’, ‘1 hour’, ‘2 hours’, ‘3 hours’, ‘4 hours’, ‘8 hours’, ‘1 day’, ‘1 week’, ‘1 month’.
if kwargs.get("interval",False):
fig= ''.join(x for x in kwargs.get("interval") if x.isdigit())
if period.find("m")!=-1:
interval=fig +" mins"
elif period.find("h")!=-1:
interval=fig +" hours"
elif period.find("d")!=-1:
interval=fig +" day"
else:
interval='1 day'
open_=[]
close=[]
low=[]
high=[]
myIB=MyIB()
for symbol in symbols:
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib, #"10 D","1 M"
barSizeSetting=interval, #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
open_=empty_append(open_,df["open"].values,axis=1)
close=empty_append(close,df["close"].values,axis=1)
high=empty_append(high,df["high"].values,axis=1)
low=empty_append(low,df["low"].values,axis=1)
volume=empty_append(low,df["volume"].values,axis=1)
cours_open=pd.DataFrame(data=open_,index=df["date"],columns=symbols)
cours_close=pd.DataFrame(data=close,index=df["date"],columns=symbols)
cours_low=pd.DataFrame(data=low,index=df["date"],columns=symbols)
cours_high=pd.DataFrame(data=high,index=df["date"],columns=symbols)
cours_volume=pd.DataFrame(data=volume,index=df["date"],columns=symbols)
action=Action.objects.get(symbol=index_symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib, #"10 D","1 M"
barSizeSetting=interval, #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
cours_open_ind=df["open"]
cours_close_ind=df["close"]
cours_high_ind=df["high"]
cours_low_ind=df["low"]
cours_volume_ind=df["volume"]
#Volume
if len(cours_close_ind)!=len(cours_close):
print("cours index is different from cours length")
myIB.disconnect()
else:
all_symbols=symbols+[index_symbol]
cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)
cours_action=cours.select(symbols)
cours_open =cours_action.get('Open')
cours_high=cours_action.get('High')
cours_low=cours_action.get('Low')
cours_close=cours_action.get('Close')
cours_volume=cours_action.get('Volume')
print("number of days retrieved: " + str(np.shape(cours_close)[0]))
cours_index=cours.select(index_symbol)
cours_open_ind =cours_index.get('Open')
cours_high_ind=cours_index.get('High')
cours_low_ind=cours_index.get('Low')
cours_close_ind=cours_index.get('Close')
cours_volume_ind=cours_index.get('Volume')
debug=False
if debug:
for symbol in all_symbols:
data=vbt.YFData.fetch(symbol, period=period,**kwargs)
#knowing what we drop
close_debug=data.get("Close")
for ii in range(len(close_debug)):
if math.isnan(close_debug.values[ii]):
print(symbol)
print("dropping at least " + str(close_debug.index[ii]))
return cours_high, cours_low, cours_close, cours_open, cours_volume, \
cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\
cours_volume_ind
except Exception as msg:
print(msg)
print("exception in " + __name__)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
print(msg)
def exchange_to_symbol(action):
if action.stock_ex.ib_ticker=="SBF":
return "^FCHI"
elif action.stock_ex.ib_ticker=="IBIS":
return "^GDAXI"
elif action.stock_ex.ib_ticker=="NASDAQ":
return "^IXIC"
elif action.stock_ex.ib_ticker=="BVME.ETF":
return "^IXIC" #it is only ETF anyhow
def get_exchange_actions(exchange):
cat=ActionCategory.objects.get(short="ACT")
stockEx=StockEx.objects.get(name=exchange)
c1 = Q(category=cat)
c2 = Q(stock_ex=stockEx)
actions=Action.objects.filter(c1 & c2)
return [ob.symbol for ob in actions]
def retrieve_ib_pf():
myIB=MyIB()
pf=[]
pf_short=[]
for pos in myIB.ib.positions():
contract=pos.contract
action=Action.objects.get(ib_ticker=contract.localSymbol)
if pos.position>0:
pf.append(action.symbol)
else:
pf_short.append(action.symbol)
myIB.disconnect()
return pf, pf_short
#for SL check
def get_last_price(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
myIB.disconnect()
else: #YF
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_pres=cours_close[symbol].iloc[-1]
return cours_pres
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
def get_ratio(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
cours_ref, cours_open=myIB.get_past_closing_price(contract)
if kwargs.get("opening",False):
cours_pres=cours_open
myIB.disconnect()
else: #YF
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_ref=cours_close[symbol].iloc[0]
if kwargs.get("opening",False):
cours_open=cours.get("Open")
cours_pres=cours_open[symbol].iloc[-1]
else:
cours_pres=cours_close[symbol].iloc[-1]
return rel_dif(cours_pres,
cours_ref
)*100
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
class MyIB():
def __init__(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ib = IB()
self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)
def cash_balance(self):
try:
for v in self.ib.accountSummary():
if v.tag == 'CashBalance':
return float(v.value)
except:
return 0
def test(self,symbol):
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
print(self.ib.qualifyContracts(contract))
def retrieve(self,contract,period):
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period, #"10 D","1 M"
barSizeSetting='1 hour', #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
return util.df(bars)
def get_last_price(self,contract):
m_data = self.ib.reqMktData(contract)
while m_data.last != m_data.last: #Wait until data is in.
self.ib.sleep(0.01)
self.ib.cancelMktData(contract)
return m_data.last
def get_past_closing_price(self,contract):
period="2 D"
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period, #"10 D","1 M"
barSizeSetting='1 day', #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
return df.iloc[0]["close"], df.iloc[-1]["open"]
def place(self,buy,ticker,currency,exchange,**kwargs): #quantity in euros
if ticker=="AAA":
print("ticker not found")
return "", 0
else:
contract = Stock(ticker, exchange, currency)
self.ib.qualifyContracts(contract)
if buy:
order_size=kwargs.get("order_size",0)
last_price=self.get_last_price(contract)
quantity=math.floor(order_size/last_price)
order = MarketOrder('BUY', quantity)
else:
quantity=kwargs.get("quantity",0)
order = MarketOrder('SELL', quantity)
trade = self.ib.placeOrder(contract, order)
self.ib.sleep(1.0)
if trade.orderStatus.status == 'Filled':
fill = trade.fills[-1]
txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'
price=fill.execution.avgPrice
return txt, price, quantity
def exit_order(self,symbol,strategy, exchange,short,**kwargs):
#type check necessary for indexes
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol) #actually should be more complex
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
#profit
if len(order)>0:
txt, order[0].exiting_price, quantity= self.place(False,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
quantity=order[0].quantity)
order[0].exiting_date=timezone.now()
if order[0].entering_price is not None:
order[0].profit=order[0].exiting_price-order[0].entering_price
order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol)
pf.save()
return True
else:
print("order not found " + symbol)
return False
return False
except Exception as msg:
print("exception in exit")
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def entry_order(self,symbol,strategy, exchange,short,**kwargs):
try:
#type check necessary for indexes
pf= get_pf(strategy, exchange,short)
order_size=5000
ocap=get_order_capital(strategy, exchange,short)
#accountSummary
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
excluded=Excluded.objects.get(name="all") #list of actions completely excluded from entries
if (symbol not in pf.retrieve() and
symbol not in excluded.retrieve() and
ocap.capital>0 and
order_size<=self.cash_balance()):
order=Order(action=action, pf=pf)
txt, order.entering_price, order.quantity= self.place(True,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
order_size=order_size)
if kwargs.get("sl",False):
sl=kwargs.get("sl")
order.sl_threshold=order.entering_price*(1-sl)
order.save()
pf.append(symbol)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def disconnect(self):
self.ib.disconnect()
def check_hold_duration(symbol,strategy, exchange,short,**kwargs):
#type check necessary for indexes
try:
pf= get_pf(strategy, exchange,short)
#accountSummary
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
delta=timezone.now()-order[0].entering_date
return delta.days
return 0
except Exception as msg:
print("exception in " + __name__)
print(msg)
return 0
def entry_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True
else:
return entry_order_test(symbol,strategy, exchange,short,**kwargs), False
def exit_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True
else:
return exit_order_test(symbol,strategy, exchange,short,**kwargs), False
def entry_order_test(symbol,strategy, exchange,short,**kwargs):
try:
#type check necessary for indexes
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
excluded=Excluded.objects.get(name="all") #list of actions completely excluded from entries
if (symbol2 not in pf.retrieve() and
symbol2 not in excluded.retrieve() and
ocap.capital>0):
order=Order(action=action, pf=pf)
order.entering_price=1.0
order.save()
#post telegram
pf.append(symbol2)
pf.save()
ocap.capital-=1 #also for short
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def exit_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol) #actually should be more complex
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
if symbol2 in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
#post telegram
#price
#profit
if len(order)>0:
order[0].exiting_date=timezone.now()
order[0].active=False
order[0].save()
ocap.capital+=1 #also for short
ocap.save()
pf.remove(symbol2)
pf.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
class Currency(models.Model):
name=models.CharField(max_length=100, blank=False)
symbol=models.CharField(max_length=100, blank=False,default="A")
def __str__(self):
return self.name
class Fees(models.Model):
name=models.CharField(max_length=100, blank=False, default="fee")
fixed=models.DecimalField(max_digits=100, decimal_places=5)
percent=models.DecimalField(max_digits=100, decimal_places=5)
def __str__(self):
return self.name
class StockEx(models.Model):
name=models.CharField(max_length=100, blank=False)
fees=models.ForeignKey('Fees',on_delete=models.CASCADE)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
opening_time=models.TimeField(default="09:00:00")
closing_time=models.TimeField(default="17:00:00")
def __str__(self):
return self.name
class Strategy(models.Model):
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
### Index is like action, but it had to be separated, as an index cannot be bought directly
class Index(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')
etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')
class Meta:
ordering = ["name"]
def ib_ticker(self):
return self.ib_ticker
def __str__(self):
return self.name
class Action(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
class Meta:
ordering = ["name"]
def ib_ticker(self):
t=self.symbol.split(".")
return t[0]
def __str__(self):
return self.name
class Order(models.Model):
action=models.ForeignKey('Action',on_delete=models.CASCADE)
pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)
active=models.BooleanField(blank=False,default=True)
entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)#default=timezone.now())
exiting_date=models.DateTimeField(null=True, blank=True)
entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
def __str__(self):
return self.action.name + " "+ str(self.entering_date)
def pf_retrieve_all(**kwargs):
arr=[]
for pf in PF.objects.filter(short=kwargs.get("short",False)):
cat=ActionCategory.objects.get(short="ACT")
c1 = Q(category=cat)
if kwargs.get("opening")=="9h":
stockEx1=StockEx.objects.filter(name="Paris")
stockEx2=StockEx.objects.filter(name="XETRA")
c2 = Q(stock_ex=stockEx1[0])
c3 = Q(stock_ex=stockEx2[0])
actions=pf.actions.filter(c1 & (c2|c3))
elif kwargs.get("opening")=="15h":
stockEx1=StockEx.objects.filter(name="Nasdaq")
c2 = Q(stock_ex=stockEx1[0])
actions=pf.actions.filter(c1 & c2)
else:
actions=pf.actions.filter(c1)
for action in actions:
if not action.symbol in arr:
arr.append(action.symbol)
return arr
### Portfolio for a given strategy (used as name presently)
class PF(models.Model):
# can be replaced with ib.positions() or ib.portfolio()
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
short=models.BooleanField(blank=False,default=False)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in remove_symbol")
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def append(self,symbol):
try:
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def get_pf(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return PF.objects.get(c1 & c2 & c3)
### To distinguish between ETF, actions, indexes...
class ActionCategory(models.Model):
short=models.CharField(max_length=15, blank=False, default="AAA", primary_key=True)
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
###To define the capital assigned to one strategy.
###Not used presently
class Capital(models.Model):
#self.ib.accountSummary()
capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
name=models.CharField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return Capital.objects.get(c1 & c2 & c3)
###To define the number of orders assigned to one strategy
###1 means that only one action can be owned at a time using this strategy
class OrderCapital(models.Model):
capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
name=models.CharField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_order_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return OrderCapital.objects.get(c1 & c2)
###For strategy using two time frame, in the slow one (10 days) candidates are defined
###And on daily basis the other strategy decides which of the candidate is really bought or sold
class Candidates(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=1)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol): #so we can name as for list
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
def get_candidates(strategy, exchange):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return Candidates.objects.get(c1 & c2)
### List of actions provisory excluded for a strategy as it risks to perform bad
class Excluded(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
### Define a list of actions and indexes that can be traded using the defined strategy
class StratCandidates(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
indexes=models.ManyToManyField(Index,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name | 35.953567 | 270 | 0.576936 | from django.db import models
from django.utils import timezone
from django.db.models import Q
import asyncio
from ib_insync import IB, Stock, MarketOrder, util
from core.common import empty_append
from core.indicators import rel_dif
import vectorbtpro as vbt
import sys
import math
import pandas as pd
import numpy as np
from trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,
IB_LOCALHOST, IB_PORT)
e):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if action.stock_ex.ib_ticker in ["BVME.ETF"]:
IBok=False
break
index_symbol=exchange_to_symbol(action)
if (USE_IB_FOR_DATA and IBok) or kwargs.get("useIB",False):
fig= ''.join(x for x in period if x.isdigit())
if period.find("d")!=-1:
period_ib=fig +" D"
elif period.find("mo")!=-1:
period_ib=fig +" M"
elif period.find("y")!=-1:
period_ib=fig +" Y"
if kwargs.get("interval",False):
fig= ''.join(x for x in kwargs.get("interval") if x.isdigit())
if period.find("m")!=-1:
interval=fig +" mins"
elif period.find("h")!=-1:
interval=fig +" hours"
elif period.find("d")!=-1:
interval=fig +" day"
else:
interval='1 day'
open_=[]
close=[]
low=[]
high=[]
myIB=MyIB()
for symbol in symbols:
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib,
barSizeSetting=interval,
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
open_=empty_append(open_,df["open"].values,axis=1)
close=empty_append(close,df["close"].values,axis=1)
high=empty_append(high,df["high"].values,axis=1)
low=empty_append(low,df["low"].values,axis=1)
volume=empty_append(low,df["volume"].values,axis=1)
cours_open=pd.DataFrame(data=open_,index=df["date"],columns=symbols)
cours_close=pd.DataFrame(data=close,index=df["date"],columns=symbols)
cours_low=pd.DataFrame(data=low,index=df["date"],columns=symbols)
cours_high=pd.DataFrame(data=high,index=df["date"],columns=symbols)
cours_volume=pd.DataFrame(data=volume,index=df["date"],columns=symbols)
action=Action.objects.get(symbol=index_symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib,
barSizeSetting=interval,
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
cours_open_ind=df["open"]
cours_close_ind=df["close"]
cours_high_ind=df["high"]
cours_low_ind=df["low"]
cours_volume_ind=df["volume"]
if len(cours_close_ind)!=len(cours_close):
print("cours index is different from cours length")
myIB.disconnect()
else:
all_symbols=symbols+[index_symbol]
cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)
cours_action=cours.select(symbols)
cours_open =cours_action.get('Open')
cours_high=cours_action.get('High')
cours_low=cours_action.get('Low')
cours_close=cours_action.get('Close')
cours_volume=cours_action.get('Volume')
print("number of days retrieved: " + str(np.shape(cours_close)[0]))
cours_index=cours.select(index_symbol)
cours_open_ind =cours_index.get('Open')
cours_high_ind=cours_index.get('High')
cours_low_ind=cours_index.get('Low')
cours_close_ind=cours_index.get('Close')
cours_volume_ind=cours_index.get('Volume')
debug=False
if debug:
for symbol in all_symbols:
data=vbt.YFData.fetch(symbol, period=period,**kwargs)
close_debug=data.get("Close")
for ii in range(len(close_debug)):
if math.isnan(close_debug.values[ii]):
print(symbol)
print("dropping at least " + str(close_debug.index[ii]))
return cours_high, cours_low, cours_close, cours_open, cours_volume, \
cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\
cours_volume_ind
except Exception as msg:
print(msg)
print("exception in " + __name__)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
print(msg)
def exchange_to_symbol(action):
if action.stock_ex.ib_ticker=="SBF":
return "^FCHI"
elif action.stock_ex.ib_ticker=="IBIS":
return "^GDAXI"
elif action.stock_ex.ib_ticker=="NASDAQ":
return "^IXIC"
elif action.stock_ex.ib_ticker=="BVME.ETF":
return "^IXIC"
def get_exchange_actions(exchange):
cat=ActionCategory.objects.get(short="ACT")
stockEx=StockEx.objects.get(name=exchange)
c1 = Q(category=cat)
c2 = Q(stock_ex=stockEx)
actions=Action.objects.filter(c1 & c2)
return [ob.symbol for ob in actions]
def retrieve_ib_pf():
myIB=MyIB()
pf=[]
pf_short=[]
for pos in myIB.ib.positions():
contract=pos.contract
action=Action.objects.get(ib_ticker=contract.localSymbol)
if pos.position>0:
pf.append(action.symbol)
else:
pf_short.append(action.symbol)
myIB.disconnect()
return pf, pf_short
def get_last_price(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
myIB.disconnect()
else:
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_pres=cours_close[symbol].iloc[-1]
return cours_pres
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
def get_ratio(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
cours_ref, cours_open=myIB.get_past_closing_price(contract)
if kwargs.get("opening",False):
cours_pres=cours_open
myIB.disconnect()
else:
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_ref=cours_close[symbol].iloc[0]
if kwargs.get("opening",False):
cours_open=cours.get("Open")
cours_pres=cours_open[symbol].iloc[-1]
else:
cours_pres=cours_close[symbol].iloc[-1]
return rel_dif(cours_pres,
cours_ref
)*100
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
class MyIB():
def __init__(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ib = IB()
self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)
def cash_balance(self):
try:
for v in self.ib.accountSummary():
if v.tag == 'CashBalance':
return float(v.value)
except:
return 0
def test(self,symbol):
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
print(self.ib.qualifyContracts(contract))
def retrieve(self,contract,period):
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period,
barSizeSetting='1 hour',
whatToShow='TRADES',
useRTH=True,
formatDate=1)
return util.df(bars)
def get_last_price(self,contract):
m_data = self.ib.reqMktData(contract)
while m_data.last != m_data.last:
self.ib.sleep(0.01)
self.ib.cancelMktData(contract)
return m_data.last
def get_past_closing_price(self,contract):
period="2 D"
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period,
barSizeSetting='1 day',
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
return df.iloc[0]["close"], df.iloc[-1]["open"]
def place(self,buy,ticker,currency,exchange,**kwargs):
if ticker=="AAA":
print("ticker not found")
return "", 0
else:
contract = Stock(ticker, exchange, currency)
self.ib.qualifyContracts(contract)
if buy:
order_size=kwargs.get("order_size",0)
last_price=self.get_last_price(contract)
quantity=math.floor(order_size/last_price)
order = MarketOrder('BUY', quantity)
else:
quantity=kwargs.get("quantity",0)
order = MarketOrder('SELL', quantity)
trade = self.ib.placeOrder(contract, order)
self.ib.sleep(1.0)
if trade.orderStatus.status == 'Filled':
fill = trade.fills[-1]
txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'
price=fill.execution.avgPrice
return txt, price, quantity
def exit_order(self,symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
txt, order[0].exiting_price, quantity= self.place(False,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
quantity=order[0].quantity)
order[0].exiting_date=timezone.now()
if order[0].entering_price is not None:
order[0].profit=order[0].exiting_price-order[0].entering_price
order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol)
pf.save()
return True
else:
print("order not found " + symbol)
return False
return False
except Exception as msg:
print("exception in exit")
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def entry_order(self,symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
order_size=5000
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
excluded=Excluded.objects.get(name="all")
if (symbol not in pf.retrieve() and
symbol not in excluded.retrieve() and
ocap.capital>0 and
order_size<=self.cash_balance()):
order=Order(action=action, pf=pf)
txt, order.entering_price, order.quantity= self.place(True,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
order_size=order_size)
if kwargs.get("sl",False):
sl=kwargs.get("sl")
order.sl_threshold=order.entering_price*(1-sl)
order.save()
pf.append(symbol)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def disconnect(self):
self.ib.disconnect()
def check_hold_duration(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
delta=timezone.now()-order[0].entering_date
return delta.days
return 0
except Exception as msg:
print("exception in " + __name__)
print(msg)
return 0
def entry_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True
else:
return entry_order_test(symbol,strategy, exchange,short,**kwargs), False
def exit_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True
else:
return exit_order_test(symbol,strategy, exchange,short,**kwargs), False
def entry_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
excluded=Excluded.objects.get(name="all")
if (symbol2 not in pf.retrieve() and
symbol2 not in excluded.retrieve() and
ocap.capital>0):
order=Order(action=action, pf=pf)
order.entering_price=1.0
order.save()
pf.append(symbol2)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def exit_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
if symbol2 in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
order[0].exiting_date=timezone.now()
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol2)
pf.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
class Currency(models.Model):
name=models.CharField(max_length=100, blank=False)
symbol=models.CharField(max_length=100, blank=False,default="A")
def __str__(self):
return self.name
class Fees(models.Model):
name=models.CharField(max_length=100, blank=False, default="fee")
fixed=models.DecimalField(max_digits=100, decimal_places=5)
percent=models.DecimalField(max_digits=100, decimal_places=5)
def __str__(self):
return self.name
class StockEx(models.Model):
name=models.CharField(max_length=100, blank=False)
fees=models.ForeignKey('Fees',on_delete=models.CASCADE)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
opening_time=models.TimeField(default="09:00:00")
closing_time=models.TimeField(default="17:00:00")
def __str__(self):
return self.name
class Strategy(models.Model):
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
ame=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')
etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')
class Meta:
ordering = ["name"]
def ib_ticker(self):
return self.ib_ticker
def __str__(self):
return self.name
class Action(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
class Meta:
ordering = ["name"]
def ib_ticker(self):
t=self.symbol.split(".")
return t[0]
def __str__(self):
return self.name
class Order(models.Model):
action=models.ForeignKey('Action',on_delete=models.CASCADE)
pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)
active=models.BooleanField(blank=False,default=True)
entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)
exiting_date=models.DateTimeField(null=True, blank=True)
entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
def __str__(self):
return self.action.name + " "+ str(self.entering_date)
def pf_retrieve_all(**kwargs):
arr=[]
for pf in PF.objects.filter(short=kwargs.get("short",False)):
cat=ActionCategory.objects.get(short="ACT")
c1 = Q(category=cat)
if kwargs.get("opening")=="9h":
stockEx1=StockEx.objects.filter(name="Paris")
stockEx2=StockEx.objects.filter(name="XETRA")
c2 = Q(stock_ex=stockEx1[0])
c3 = Q(stock_ex=stockEx2[0])
actions=pf.actions.filter(c1 & (c2|c3))
elif kwargs.get("opening")=="15h":
stockEx1=StockEx.objects.filter(name="Nasdaq")
c2 = Q(stock_ex=stockEx1[0])
actions=pf.actions.filter(c1 & c2)
else:
actions=pf.actions.filter(c1)
for action in actions:
if not action.symbol in arr:
arr.append(action.symbol)
return arr
ield(Action,blank=True)
short=models.BooleanField(blank=False,default=False)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in remove_symbol")
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def append(self,symbol):
try:
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def get_pf(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return PF.objects.get(c1 & c2 & c3)
="AAA", primary_key=True)
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
harField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return Capital.objects.get(c1 & c2 & c3)
lank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_order_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return OrderCapital.objects.get(c1 & c2)
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
def get_candidates(strategy, exchange):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return Candidates.objects.get(c1 & c2)
egy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
.ManyToManyField(Index,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name | true | true |
f71ca7306894b8080e9f8813e913c2b35a942d36 | 851 | py | Python | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | 7 | 2019-01-17T16:46:24.000Z | 2020-09-09T06:35:26.000Z | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | null | null | null | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | null | null | null | from easydict import EasyDict as edict
# the corresponding semantics to the index of
# obs.observation.feature_minimap and obs.observation.feature_screen
feature_mini_id = edict()
feature_mini_id.HEIGHT_MAP = 0
feature_mini_id.VISIBILITY = 1
feature_mini_id.CREEP = 2
feature_mini_id.CAMERA = 3
feature_mini_id.PLAYER_ID = 4
feature_mini_id.PLAYER_RELATIVE = 5
feature_mini_id.PLAYER_SELECTED = 6
feature_screen_id = edict()
feature_screen_id.HEIGHT_MAP = 0
feature_screen_id.VISIBILITY = 1
feature_screen_id.CREEP = 2
feature_screen_id.POWER = 3
feature_screen_id.PLAYER_ID = 4
feature_screen_id.PLAYER_RELATIVE = 5
feature_screen_id.UNIT_TYPE = 6
feature_screen_id.SELECTED = 7
feature_screen_id.HIT_POINTS = 8
feature_screen_id.ENERGY = 9
feature_screen_id.SHIELDS = 10
feature_screen_id.UNIT_DENSITY = 11
feature_screen_id.UNIT_DENSITY_AA = 12
| 29.344828 | 68 | 0.836663 | from easydict import EasyDict as edict
feature_mini_id = edict()
feature_mini_id.HEIGHT_MAP = 0
feature_mini_id.VISIBILITY = 1
feature_mini_id.CREEP = 2
feature_mini_id.CAMERA = 3
feature_mini_id.PLAYER_ID = 4
feature_mini_id.PLAYER_RELATIVE = 5
feature_mini_id.PLAYER_SELECTED = 6
feature_screen_id = edict()
feature_screen_id.HEIGHT_MAP = 0
feature_screen_id.VISIBILITY = 1
feature_screen_id.CREEP = 2
feature_screen_id.POWER = 3
feature_screen_id.PLAYER_ID = 4
feature_screen_id.PLAYER_RELATIVE = 5
feature_screen_id.UNIT_TYPE = 6
feature_screen_id.SELECTED = 7
feature_screen_id.HIT_POINTS = 8
feature_screen_id.ENERGY = 9
feature_screen_id.SHIELDS = 10
feature_screen_id.UNIT_DENSITY = 11
feature_screen_id.UNIT_DENSITY_AA = 12
| true | true |
f71ca8df5ac6d2ef263acfbbb27f84f925bf74a8 | 455 | py | Python | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | null | null | null | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | 5 | 2021-03-19T11:56:51.000Z | 2022-02-10T14:08:09.000Z | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | 1 | 2020-10-29T17:41:34.000Z | 2020-10-29T17:41:34.000Z | # Generated by Django 2.2 on 2021-01-12 07:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0031_auto_20201217_2330'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 22.75 | 114 | 0.589011 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0031_auto_20201217_2330'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| true | true |
f71ca96e2c4377bd676e8a3d35dfed029ac7363e | 16,669 | py | Python | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%d/%m/%Y': '%d/%m/%Y',
'%d/%m/%Y %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'admin': 'admin',
'Admin language': 'Admin language',
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'administrative interface': 'administrative interface',
'Ajax Recipes': 'Recettes Ajax',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Change password': 'Change password',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugiciels',
'Config.ini': 'Config.ini',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Created By': 'Créé par',
'created by': 'created by',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'Modèle BD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'Design': 'Design',
'direction: ltr': 'direction: ltr',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Courriel et texto',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Graph Model': 'Représentation graphique du modèle',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'ID du groupe',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Helping web2py': 'Aider web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'importer',
'Import/Export': 'Importer/Exporter',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'Est actif',
'Key': 'Clé',
'Key verified': 'Key verified',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Clavardage en direct',
'Loading...': 'Chargement...',
'loading...': 'chargement...',
'Log In': 'Connexion',
'Logged in': 'Connecté',
'Logged out': 'Logged out',
'login': 'connexion',
'Login': 'Connexion',
'Login disabled by administrator': 'Login disabled by administrator',
'logout': 'déconnexion',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gérer le Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New password': 'New password',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next %s rows': '%s prochaine lignes',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'no package selected': 'no package selected',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Objet ou nom de table',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugiciels',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
'Password changed': 'Password changed',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': "S'il vous plaît entrer votre mot de passe à nouveau",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous %s rows': '%s lignes précédentes',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profil',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'Bibliothèque pygraphviz introuvable',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram vidée',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Enregistrement %(id)s créé',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Enregistrement %(id)s modifié',
'Record Created': 'Enregistrement créé',
'Record Deleted': 'Record Deleted',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID de l'enregistrement",
'Record id': "id de l'enregistrement",
'Record Updated': 'Enregistrement modifié',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'inscription",
'Registration is pending approval': 'Registration is pending approval',
'Registration key': "Clé d'enregistrement",
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Roles': 'Rôles',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Save model as...': 'Enregistrer le modèle sous...',
'Semantic': 'Sémantique',
'Services': 'Services',
'Sign Up': "S'inscrire",
'Size of cache:': 'Taille de la mémoire cache:',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Unable to send email': 'Unable to send email',
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'Working...': 'Working...',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': "Vous avez visité l'URL %s",
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| 52.91746 | 293 | 0.669866 |
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%d/%m/%Y': '%d/%m/%Y',
'%d/%m/%Y %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'admin': 'admin',
'Admin language': 'Admin language',
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'administrative interface': 'administrative interface',
'Ajax Recipes': 'Recettes Ajax',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Change password': 'Change password',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugiciels',
'Config.ini': 'Config.ini',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Created By': 'Créé par',
'created by': 'created by',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'Modèle BD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'Design': 'Design',
'direction: ltr': 'direction: ltr',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Courriel et texto',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Graph Model': 'Représentation graphique du modèle',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'ID du groupe',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Helping web2py': 'Aider web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'importer',
'Import/Export': 'Importer/Exporter',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'Est actif',
'Key': 'Clé',
'Key verified': 'Key verified',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Clavardage en direct',
'Loading...': 'Chargement...',
'loading...': 'chargement...',
'Log In': 'Connexion',
'Logged in': 'Connecté',
'Logged out': 'Logged out',
'login': 'connexion',
'Login': 'Connexion',
'Login disabled by administrator': 'Login disabled by administrator',
'logout': 'déconnexion',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gérer le Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New password': 'New password',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next %s rows': '%s prochaine lignes',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'no package selected': 'no package selected',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Objet ou nom de table',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugiciels',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
'Password changed': 'Password changed',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': "S'il vous plaît entrer votre mot de passe à nouveau",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous %s rows': '%s lignes précédentes',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profil',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'Bibliothèque pygraphviz introuvable',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram vidée',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Enregistrement %(id)s créé',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Enregistrement %(id)s modifié',
'Record Created': 'Enregistrement créé',
'Record Deleted': 'Record Deleted',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID de l'enregistrement",
'Record id': "id de l'enregistrement",
'Record Updated': 'Enregistrement modifié',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'inscription",
'Registration is pending approval': 'Registration is pending approval',
'Registration key': "Clé d'enregistrement",
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Roles': 'Rôles',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Save model as...': 'Enregistrer le modèle sous...',
'Semantic': 'Sémantique',
'Services': 'Services',
'Sign Up': "S'inscrire",
'Size of cache:': 'Taille de la mémoire cache:',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Unable to send email': 'Unable to send email',
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'Working...': 'Working...',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': "Vous avez visité l'URL %s",
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| true | true |
f71ca9b490a0a319f83ff81055834fce51a392e2 | 701 | py | Python | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from content.ext.envconfig import EnvConfig
@pytest.mark.parametrize('use_init_app', [True, False])
def test_ext_init(app, mocker, use_init_app):
mock_init_app = mocker.patch.object(EnvConfig, 'init_app')
if use_init_app:
ext = EnvConfig()
ext.init_app(app)
else:
EnvConfig(app)
assert mock_init_app.called_with(app)
@pytest.mark.parametrize('value, expected', [
(1, 1),
('x', 'x'),
('[1, "x"]', [1, 'x']),
('123abc', '123abc')
])
def test_envconfig(app, monkeypatch, value, expected):
monkeypatch.setenv('APP_TEST_VALUE', value)
env = EnvConfig()
env.init_app(app)
assert app.config['TEST_VALUE'] == expected
| 25.035714 | 62 | 0.653352 | import pytest
from content.ext.envconfig import EnvConfig
@pytest.mark.parametrize('use_init_app', [True, False])
def test_ext_init(app, mocker, use_init_app):
mock_init_app = mocker.patch.object(EnvConfig, 'init_app')
if use_init_app:
ext = EnvConfig()
ext.init_app(app)
else:
EnvConfig(app)
assert mock_init_app.called_with(app)
@pytest.mark.parametrize('value, expected', [
(1, 1),
('x', 'x'),
('[1, "x"]', [1, 'x']),
('123abc', '123abc')
])
def test_envconfig(app, monkeypatch, value, expected):
monkeypatch.setenv('APP_TEST_VALUE', value)
env = EnvConfig()
env.init_app(app)
assert app.config['TEST_VALUE'] == expected
| true | true |
f71ca9df83a8f9e1e8cf5e848d1ced2172679a2a | 8,631 | py | Python | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 1 | 2021-01-03T23:09:28.000Z | 2021-01-03T23:09:28.000Z | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 6 | 2020-12-26T21:02:42.000Z | 2020-12-26T21:02:52.000Z | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | null | null | null | # ======================================================================
# Amplification Circuit
# Advent of Code 2019 Day 07 -- Eric Wastl -- https://adventofcode.com
#
# Computer simulation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# u o m a p . p y
# ======================================================================
"Amps for Amplification Circuit problem for Advent of Code 2019 Day 07"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
from __future__ import print_function
from itertools import permutations
import intcode
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
PHASES = '01234'
FEEDBACK = '56789'
LETTERS = 'ABCDE'
# ======================================================================
# Amps
# ======================================================================
class Amps(object):
"""Object representing a series of amplifiers"""
def __init__(self, num=5, inp=0, text=None, feedback=False):
# 1. Start with no amplifiers
self.amps = []
self.num = num
self.inp = inp
self.text = text
self.output = 0
self.phases = None
self.feedback = feedback
#print("Creating amplifiers feedback=%s" % (feedback))
# 2. Create as many amplifiers as needed
assert num <= 5
for indx in range(num):
# 3. Create an amplifier and add it to the list
self.amps.append(Amp(letter=LETTERS[indx], text=text))
def find_best(self, watch=False):
"Find the ordering of phases to maximize output"
#print("find_best feedback=%s watch=%s" % (self.feedback, watch))
# 1. Start with a very poor output
best_output = 0
# 2. loop for all of the permutations of the phases
if self.feedback:
phase_numbers = FEEDBACK
else:
phase_numbers = PHASES
for phases in list(permutations(phase_numbers)):
# 3, Run this set of phases
if self.feedback:
output = self.run_feedback(phases=phases, inp=self.inp, watch=watch)
else:
output = self.run_series(phases=phases, inp=self.inp)
# 4. If this is better that what we had before, save it
if output > best_output:
best_output = output
self.output = output
self.phases = phases
if watch:
print("Setting best to %d for phase %s" % (output, phases))
# 5. Return the best output
return best_output
def run_series(self, phases=PHASES, inp=None):
"Run all the amplifiers in series"
# 1. Start with no final output and the initial input
self.output = None
if inp is None:
inp = self.inp
# 2. Run all the amplifiers in turn
for indx in range(self.num):
# 3. Run one amplifier
output = self.amps[indx].run(inp=inp, phase=int(phases[indx]))
# 4. If there was a problem exit
if output is None:
break
# 5. Set up to run the next amplifier
inp = output
# 6. Return the result from the last amplifier run
return output
def run_feedback(self, phases=FEEDBACK, inp=None, watch=False):
"Run all the amplifiers in series with a feedback loop"
# 1. Start with no final output and the initial input
self.output = None
inputs = [0, 0, 0, 0, 0, 0]
status = [intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,]
outputs = [0, 0, 0, 0, 0, 0]
if inp is None:
inputs[0] = self.inp
else:
inputs[0] = inp
# 2. Reset all of the amplifiers
for indx in range(self.num):
self.amps[indx].computer = None
# 3. Run amplifiers until done:
while status[0] != intcode.STOP_HLT:
if watch:
print('Starting feedback loop with input=%s' % (inputs[0]))
# 4. Run all the amplifiers in turn
for indx in range(self.num):
# 5. Run one amplifier
output = self.amps[indx].fb_run(inp=inputs[indx], phase=int(phases[indx]))
# 6. If there was a problem exit
if output is None:
return None
# 7. Set up to run the next amplifier
if watch:
print("phases=%s, amp %s output=%s" % (phases, indx, output))
status[indx] = output[0]
output = output[1]
outputs[indx] = output
inputs[0] = output
inputs[indx+1] = output
# 8. Return the result from the last amplifier run
return output
# ======================================================================
# Amp
# ======================================================================
class Amp(object): #pylint: disable=R0903
"""Object representing a series of amplifier"""
def __init__(self, letter='Z', text=None):
# 1. Store the values
self.letter = letter
self.text = text
self.computer = None
def run(self, phase=0, inp=0):
"Return the result of running the computer with inputs phase and inp"
# 1. Create a computer with the program from text
self.computer = intcode.IntCode(text=self.text)
# 3. Run the computer with inputs
result = self.computer.run(inp=[phase, inp])
# 4. Make sure it ended with a halt instruction
if result != intcode.STOP_HLT:
print("amplifier %s input=[%d,%d] ended with %d" %
(self.letter, phase, inp, result))
return None
# 5. Return the output
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=[%d,%d] ended produced %d outputs" %
(self.letter, phase, inp, len(output)))
return None
return output[0]
def fb_run(self, phase=0, inp=0):
"Return the status and output of running the amplifier with inputs phase and inp"
# 1. Create a computer with the program from text (if not already created)
if self.computer is None:
self.computer = intcode.IntCode(text=self.text)
inp = [phase, inp]
else:
inp = [inp]
# 3. Run the computer with inputs
#print("Running computer with input = %s, counter=%s" % (inp, self.computer.counter))
result = self.computer.run(inp=inp)
# 4. Make sure it ended with a halt instruction or input instruction
if result not in (intcode.STOP_HLT, intcode.STOP_INP):
print("amplifier %s input=%s ended with %d" % (self.letter, inp, result))
return None
# 5. Return the result and output
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=%s ended produced %d outputs" %
(self.letter, inp, len(output)))
return None
return (result, output[0])
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end u o m a p . p y end
# ======================================================================
| 36.884615 | 94 | 0.446993 |
from __future__ import print_function
from itertools import permutations
import intcode
PHASES = '01234'
FEEDBACK = '56789'
LETTERS = 'ABCDE'
class Amps(object):
def __init__(self, num=5, inp=0, text=None, feedback=False):
self.amps = []
self.num = num
self.inp = inp
self.text = text
self.output = 0
self.phases = None
self.feedback = feedback
assert num <= 5
for indx in range(num):
self.amps.append(Amp(letter=LETTERS[indx], text=text))
def find_best(self, watch=False):
best_output = 0
if self.feedback:
phase_numbers = FEEDBACK
else:
phase_numbers = PHASES
for phases in list(permutations(phase_numbers)):
if self.feedback:
output = self.run_feedback(phases=phases, inp=self.inp, watch=watch)
else:
output = self.run_series(phases=phases, inp=self.inp)
if output > best_output:
best_output = output
self.output = output
self.phases = phases
if watch:
print("Setting best to %d for phase %s" % (output, phases))
return best_output
def run_series(self, phases=PHASES, inp=None):
self.output = None
if inp is None:
inp = self.inp
for indx in range(self.num):
output = self.amps[indx].run(inp=inp, phase=int(phases[indx]))
if output is None:
break
inp = output
return output
def run_feedback(self, phases=FEEDBACK, inp=None, watch=False):
self.output = None
inputs = [0, 0, 0, 0, 0, 0]
status = [intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,]
outputs = [0, 0, 0, 0, 0, 0]
if inp is None:
inputs[0] = self.inp
else:
inputs[0] = inp
for indx in range(self.num):
self.amps[indx].computer = None
while status[0] != intcode.STOP_HLT:
if watch:
print('Starting feedback loop with input=%s' % (inputs[0]))
for indx in range(self.num):
output = self.amps[indx].fb_run(inp=inputs[indx], phase=int(phases[indx]))
if output is None:
return None
if watch:
print("phases=%s, amp %s output=%s" % (phases, indx, output))
status[indx] = output[0]
output = output[1]
outputs[indx] = output
inputs[0] = output
inputs[indx+1] = output
return output
class Amp(object):
def __init__(self, letter='Z', text=None):
self.letter = letter
self.text = text
self.computer = None
def run(self, phase=0, inp=0):
self.computer = intcode.IntCode(text=self.text)
result = self.computer.run(inp=[phase, inp])
if result != intcode.STOP_HLT:
print("amplifier %s input=[%d,%d] ended with %d" %
(self.letter, phase, inp, result))
return None
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=[%d,%d] ended produced %d outputs" %
(self.letter, phase, inp, len(output)))
return None
return output[0]
def fb_run(self, phase=0, inp=0):
if self.computer is None:
self.computer = intcode.IntCode(text=self.text)
inp = [phase, inp]
else:
inp = [inp]
result = self.computer.run(inp=inp)
if result not in (intcode.STOP_HLT, intcode.STOP_INP):
print("amplifier %s input=%s ended with %d" % (self.letter, inp, result))
return None
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=%s ended produced %d outputs" %
(self.letter, inp, len(output)))
return None
return (result, output[0])
if __name__ == '__main__':
pass
| true | true |
f71caa1994d573bc106273e8c7f0d7dd6210d086 | 61,405 | py | Python | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_NCCL_VERSION = '2.2'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_WORKSPACE_ROOT = os.path.abspath(os.path.dirname(__file__))
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
_TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE')
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc(workspace_path):
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(workspace_path, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
if is_windows():
tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/')
else:
tf_bazelrc_path = _TF_BAZELRC
f.write('import %s\n' % tf_bazelrc_path)
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bazel version is at least min_version.
Args:
min_version: string for minimum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
"""Check the revision number of an Android NDK path."""
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
"""Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
# Find out where the CUDA toolkit is installed
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
"""Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
"""Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
Adapted from code contributed by Sami Kama (https://github.com/samikama).
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
"""Search for libnvinfer.so in "search_path"."""
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if len(matches.groups()) == 0:
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
"""Set NCCL_INSTALL_PATH and TF_NCCL_VERSION.
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the NCCL version you want to use. If NCCL %s is not '
'installed, then you can use version 1.3 that can be fetched '
'automatically but it may have worse performance with multiple GPUs. '
'[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
break # No need to get install path, NCCL 1 is a GitHub repo.
# TODO(csigg): Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# Then ask the user if we should use that. Instead of a single
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_path = 'lib/libnccl.so.%s' % tf_nccl_version
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs and syslibs != '':
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
# Enable short object file path to avoid long path issue on Windows.
# TODO(pcloudy): Remove this flag when upgrading Bazel to 0.16.0
# Short object file path will be enabled by default.
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
# When building zip file for some py_binary and py_test targets, don't
# include its dependencies. This is for:
# 1. Running python tests against the system installed TF pip package.
# 2. Avoiding redundant files in
# //tensorflow/tools/pip_package:simple_console_windows,
# which is a py_binary used during creating TF pip package.
# See https://github.com/tensorflow/tensorflow/issues/22390
write_to_bazelrc('build --define=no_tensorflow_py_deps=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=_TF_WORKSPACE_ROOT,
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_KAFKA'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_ENABLE_XLA'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
# On Windows, we don't have MKL support and the build is always monolithic.
# So no need to print the following message.
# TODO(pcloudy): remove the following if check when they make sense on Windows
if not is_windows():
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
if __name__ == '__main__':
main()
| 37.442073 | 80 | 0.689651 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_NCCL_VERSION = '2.2'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_WORKSPACE_ROOT = os.path.abspath(os.path.dirname(__file__))
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
_TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE')
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question)
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc(workspace_path):
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(workspace_path, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
if is_windows():
tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/')
else:
tf_bazelrc_path = _TF_BAZELRC
f.write('import %s\n' % tf_bazelrc_path)
def cleanup_makefile():
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
if is_ppc64le():
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if len(matches.groups()) == 0:
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the NCCL version you want to use. If NCCL %s is not '
'installed, then you can use version 1.3 that can be fetched '
'automatically but it may have worse performance with multiple GPUs. '
'[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
break # No need to get install path, NCCL 1 is a GitHub repo.
# TODO(csigg): Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# Then ask the user if we should use that. Instead of a single
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_path = 'lib/libnccl.so.%s' % tf_nccl_version
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
def toolkit_exists(toolkit_path):
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs and syslibs != '':
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
write_to_bazelrc('build --distinct_host_configuration=false')
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
# include its dependencies. This is for:
# 1. Running python tests against the system installed TF pip package.
# 2. Avoiding redundant files in
# //tensorflow/tools/pip_package:simple_console_windows,
# which is a py_binary used during creating TF pip package.
# See https://github.com/tensorflow/tensorflow/issues/22390
write_to_bazelrc('build --define=no_tensorflow_py_deps=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
print('\t--config=%-12s\t
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=_TF_WORKSPACE_ROOT,
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_KAFKA'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_ENABLE_XLA'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
# On Windows, we don't have MKL support and the build is always monolithic.
if not is_windows():
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
if __name__ == '__main__':
main()
| true | true |
f71caae6f9c23667ccfce560a4892f8c3a10bf60 | 7,955 | py | Python | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 137 | 2019-12-18T15:38:18.000Z | 2022-03-26T15:26:19.000Z | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 45 | 2019-12-20T08:24:12.000Z | 2022-03-31T12:43:19.000Z | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 34 | 2020-01-03T02:59:18.000Z | 2022-03-30T01:44:09.000Z | import os
import copy
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
# if count / len(s1.split(' ')) >= 0.7 and abs(len(s1.split(' '))-len(s2.split(' '))<5):
if count / len(s1.split(' ')) >= 0.7 and count / len(s2.split(' ')) >= 0.7:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
lines[i] = lines[i].replace('$ t $','$T$').strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-1]*len(text.split())
tags=['O']*len(text.split())
samples = []
for sample in same_samples:
# print(sample)
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$')+len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = int(sample[2])+1
if i - sample[0].split().index('$T$')<1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
pass
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
# aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_contex = sentence[0]
for i in range(aspect_num):
aspect_contex = aspect_contex.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_contex[pre_position:aspect_contex.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_contex.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
# 将数据集中的aspect切割出来
def refactor_dataset(fname, dist_fname):
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
# 将数据集中的aspect切割出来
def refactor_chinese_dataset(fname, train_fname,test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
# refactor testset
for sample in samples[:int(len(samples)/5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
# refactor trainset
for sample in samples[int(len(samples)/5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
# print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
if i + 3 < len(lines):
if is_similar(lines[i],lines[i+3]) and len((lines[i]+" "+ lines[i+1]).split()) != len((lines[i+3]+" "+ lines[i+4]).split()):
print(lines[i].replace('$T$', lines[i+1].replace('\n','')))
print(lines[i+3].replace('$T$', lines[i+4].replace('\n','')))
if __name__ == "__main__":
# # chinese datasets
# refactor_chinese_dataset(
# r"chinese_atepc_dataset/camera_output.txt",
# r"chinese_atepc_datasets/camera.atepc.train.dat",
# r"chinese_atepc_datasets/camera.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/car_output.txt",
# r"chinese_atepc_datasets/car.atepc.train.dat",
# r"chinese_atepc_datasets/car.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/notebook_output.txt",
# r"chinese_atepc_datasets/notebook.atepc.train.dat",
# r"chinese_atepc_datasets/notebook.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/phone_output.txt",
# r"chinese_atepc_datasets/phone.atepc.train.dat",
# r"chinese_atepc_datasets/phone.atepc.test.dat",
# )
# detect_error_in_dataset( r"../datasets/semeval14/Laptops_Train.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Laptops_Test_Gold.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Restaurants_Train.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg")
# detect_error_in_dataset( r"../datasets/acl-14-short-data/train.raw")
# # 笔记本数据集
# refactor_dataset(
# r"../datasets/semeval14/Laptops_Train.xml.seg",
# r"../atepc_datasets/laptop/Laptops.atepc.train.dat",
# )
# refactor_dataset(
# r"../datasets/semeval14/Laptops_Test_Gold.xml.seg",
# r"../atepc_datasets/laptop/Laptops.atepc.test.dat",
# )
# 餐厅数据集
refactor_dataset(
r"../datasets/semeval14/Restaurants_Train.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.train.dat",
)
refactor_dataset(
r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.test.dat",
)
# # 推特数据集
# refactor_dataset(
# r"../datasets/acl-14-short-data/train.raw",
# r"../atepc_datasets/twitter/twitter.atepc.train.dat",
# )
# refactor_dataset(
# r"../datasets/acl-14-short-data/test.raw",
# r"../atepc_datasets/twitter/twitter.atepc.test.dat",
# ) | 36.828704 | 136 | 0.595726 | import os
import copy
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
if count / len(s1.split(' ')) >= 0.7 and count / len(s2.split(' ')) >= 0.7:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
lines[i] = lines[i].replace('$ t $','$T$').strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-1]*len(text.split())
tags=['O']*len(text.split())
samples = []
for sample in same_samples:
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$')+len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = int(sample[2])+1
if i - sample[0].split().index('$T$')<1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
pass
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_contex = sentence[0]
for i in range(aspect_num):
aspect_contex = aspect_contex.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_contex[pre_position:aspect_contex.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_contex.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
def refactor_dataset(fname, dist_fname):
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
def refactor_chinese_dataset(fname, train_fname,test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
for sample in samples[:int(len(samples)/5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
for sample in samples[int(len(samples)/5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
if i + 3 < len(lines):
if is_similar(lines[i],lines[i+3]) and len((lines[i]+" "+ lines[i+1]).split()) != len((lines[i+3]+" "+ lines[i+4]).split()):
print(lines[i].replace('$T$', lines[i+1].replace('\n','')))
print(lines[i+3].replace('$T$', lines[i+4].replace('\n','')))
if __name__ == "__main__":
refactor_dataset(
r"../datasets/semeval14/Restaurants_Train.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.train.dat",
)
refactor_dataset(
r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.test.dat",
)
| true | true |
f71cab1867cc22a6cea57f7a9832a1702c206111 | 2,746 | py | Python | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | null | null | null | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | 1 | 2021-11-29T08:23:50.000Z | 2021-11-29T08:23:50.000Z | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 Kazuyuki HIDA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .. key_switch import nop_switch
from .. import IoExpander, KeySwitch
class TCA9555(IoExpander):
"""TCA9555(PCA9555も同じ)
"""
def __init__(self, dev_address: int):
"""デバイスアドレスを指定してオブジェクトを生成
上位4ビット分は固定なので、下位3ビット部分だけを指定する
:param dev_address: デバイスアドレスの下位3ビット分
"""
self.dev_address = dev_address + 0x20
self.switches = []
for i in range(16):
self.switches.append(nop_switch())
def init_device(self, i2c) -> bool:
"""I2Cの初期化
:param i2c: I2Cマスタ
:return: Trueを返す
"""
i2c.writeto(self.dev_address, bytes([0x06, 0xFF]))
i2c.writeto(self.dev_address, bytes([0x07, 0xFF]))
return True
def read_device(self, i2c) -> [bool]:
"""I/Oエクスパンダを読み込んで、その状態を返す
:param i2c: I2Cマスタ
:return: 各ピンの状態(ONでTrue)のリストを返す
"""
buffer = bytearray(2)
i2c.writeto_then_readfrom(self.dev_address, bytes([0x00]), buffer)
result = []
for i, b in enumerate(buffer):
for p in range(8):
mask = 1 << p
if buffer[i] & mask != 0:
result.append(False)
else:
result.append(True)
return result
def assign(self, pin: int, switch: KeySwitch):
"""ピンにキースイッチを割り当てる
:param pin: ピン番号(0オリジン)
:param switch: キースイッチ
"""
self.switches[pin] = switch
def switch(self, pin: int) -> KeySwitch:
"""ピンに対応するキースイッチを返す
:param pin: ピン番号(0オリジン)
:return: 対応するキースイッチ
"""
return self.switches[pin]
| 34.759494 | 80 | 0.643117 |
from .. key_switch import nop_switch
from .. import IoExpander, KeySwitch
class TCA9555(IoExpander):
def __init__(self, dev_address: int):
self.dev_address = dev_address + 0x20
self.switches = []
for i in range(16):
self.switches.append(nop_switch())
def init_device(self, i2c) -> bool:
i2c.writeto(self.dev_address, bytes([0x06, 0xFF]))
i2c.writeto(self.dev_address, bytes([0x07, 0xFF]))
return True
def read_device(self, i2c) -> [bool]:
buffer = bytearray(2)
i2c.writeto_then_readfrom(self.dev_address, bytes([0x00]), buffer)
result = []
for i, b in enumerate(buffer):
for p in range(8):
mask = 1 << p
if buffer[i] & mask != 0:
result.append(False)
else:
result.append(True)
return result
def assign(self, pin: int, switch: KeySwitch):
self.switches[pin] = switch
def switch(self, pin: int) -> KeySwitch:
return self.switches[pin]
| true | true |
f71cab3e710d8cc552a1054d037bb361fdbacb7d | 2,386 | py | Python | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | """Test of the reversed linked list."""
import pytest
@pytest.fixture
def linked_list():
"""Make linked_list for testing."""
from linked_list import LinkedList
linked_list = LinkedList([1, 2, 3])
return linked_list
def test_empty_linked_list(linked_list):
"""Test exception from empty linked_list."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
linked_list.pop()
with pytest.raises(IndexError):
reverse_linked_list(linked_list)
def test_one_in_linked_list(linked_list):
"""Test get one time back with one item in list."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_two_in_linked_list(linked_list):
"""Test that it works with two items."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_reverse_linked_list(linked_list):
"""Test that we reverse the list."""
from reverse_linked_list import reverse_linked_list
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
def test_long_reverse_linked_list(linked_list):
"""Test that we reverse the list."""
from reverse_linked_list import reverse_linked_list
linked_list.push(4)
linked_list.push(5)
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 4
assert linked_list.head.next_node.next_node.next_node.next_node.data == 5
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
reverse_linked_list(linked_list)
assert linked_list.head.data == 5
assert linked_list.head.next_node.data == 4
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 2
assert linked_list.head.next_node.next_node.next_node.next_node.data == 1
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
| 34.57971 | 85 | 0.754401 |
import pytest
@pytest.fixture
def linked_list():
from linked_list import LinkedList
linked_list = LinkedList([1, 2, 3])
return linked_list
def test_empty_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
linked_list.pop()
with pytest.raises(IndexError):
reverse_linked_list(linked_list)
def test_one_in_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_two_in_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_reverse_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
def test_long_reverse_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.push(4)
linked_list.push(5)
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 4
assert linked_list.head.next_node.next_node.next_node.next_node.data == 5
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
reverse_linked_list(linked_list)
assert linked_list.head.data == 5
assert linked_list.head.next_node.data == 4
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 2
assert linked_list.head.next_node.next_node.next_node.next_node.data == 1
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
| true | true |
f71cabbacd1f7c032bc3b010d748f5f29a9c6426 | 442 | py | Python | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | null | null | null | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | 1 | 2019-08-20T22:20:45.000Z | 2019-08-20T22:21:38.000Z | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | null | null | null | from wtforms import Form
from wtforms import StringField
from wtforms import IntegerField
from wtforms.validators import DataRequired
class EmailForm(Form):
name = StringField('name', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
| 29.466667 | 67 | 0.757919 | from wtforms import Form
from wtforms import StringField
from wtforms import IntegerField
from wtforms.validators import DataRequired
class EmailForm(Form):
name = StringField('name', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
| true | true |
f71cabef85002e6d78fa7bf1e3356fe2e5b10593 | 2,601 | py | Python | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 9 | 2017-10-15T20:49:36.000Z | 2022-02-24T19:26:39.000Z | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 19 | 2015-07-02T15:59:06.000Z | 2020-06-09T18:13:05.000Z | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 21 | 2015-11-06T16:28:34.000Z | 2019-09-20T09:26:54.000Z | import sqlite3
import graphviz
from DSGRN._dsgrn import *
from functools import reduce
from DSGRN.Query.Logging import LogToSTDOUT
class Database:
def __init__(self, database_name):
"""
Initialize a DSGRN database object
"""
self.dbname = database_name
self.conn = sqlite3.connect(database_name)
self.cursor = self.conn.cursor()
# Load network spec from database
sqlexpression = "select Specification from Network"
self.cursor.execute(sqlexpression)
network_spec = self.cursor.fetchone()[0]
# construct network
self.network = Network(network_spec)
self.parametergraph = ParameterGraph(self.network)
# D is the number of network nodes
self.D = self.parametergraph.dimension()
self.names = [ self.network.name(i) for i in range(0, self.D)]
# DSGRN uses an indexing scheme to refer to parameters. It is based on a mixed-radix number scheme
# where the place value of each digit varies according to the number of logic parameters for each node
# and the number of order parameter for each node. Specifically, the ordering of the digits is (from least
# significant) the sizes of each factor graph, followed by the number of permutations of the out-edges for
# each node. We call these "bases" (as in number base) and we compute the place value for each digit.
self.indexing_place_bases = [self.parametergraph.logicsize(i) for i in range(0,self.D)] + [self.parametergraph.ordersize(i) for i in range(0,self.D)]
self.indexing_place_values = reduce ( lambda x, y : x + [x[-1]*y], self.indexing_place_bases[:-1], [1])
def execute(self, expression, parameters = None):
"""
Perform an SQL query.
Returns a "cursor" object (see python sqlite3 API for details)
"""
return self.cursor.execute(expression, parameters)
def __call__(self, pi):
c = self.conn.cursor()
sqlexpression = "select MorseGraphIndex from Signatures where ParameterIndex = ?"
c.execute(sqlexpression,(pi,))
mgi = c.fetchone()[0]
return mgi
def __del__(self):
"""
Commit and close upon destruction
"""
self.conn.commit()
self.conn.close()
def _repr_svg_(self):
return graphviz.Source(self.network.graphviz())._repr_svg_()
def DrawMorseGraph(self, morsegraphindex):
"""
Return an object which renders to a graphviz representation in Jupyter
"""
c = self.conn.cursor()
sqlexpression = "select Graphviz from MorseGraphViz where MorseGraphIndex = ?"
c.execute(sqlexpression,(morsegraphindex,))
gv = c.fetchone()[0]
return graphviz.Source(gv)
| 38.820896 | 153 | 0.708958 | import sqlite3
import graphviz
from DSGRN._dsgrn import *
from functools import reduce
from DSGRN.Query.Logging import LogToSTDOUT
class Database:
def __init__(self, database_name):
self.dbname = database_name
self.conn = sqlite3.connect(database_name)
self.cursor = self.conn.cursor()
sqlexpression = "select Specification from Network"
self.cursor.execute(sqlexpression)
network_spec = self.cursor.fetchone()[0]
self.network = Network(network_spec)
self.parametergraph = ParameterGraph(self.network)
self.D = self.parametergraph.dimension()
self.names = [ self.network.name(i) for i in range(0, self.D)]
self.indexing_place_bases = [self.parametergraph.logicsize(i) for i in range(0,self.D)] + [self.parametergraph.ordersize(i) for i in range(0,self.D)]
self.indexing_place_values = reduce ( lambda x, y : x + [x[-1]*y], self.indexing_place_bases[:-1], [1])
def execute(self, expression, parameters = None):
return self.cursor.execute(expression, parameters)
def __call__(self, pi):
c = self.conn.cursor()
sqlexpression = "select MorseGraphIndex from Signatures where ParameterIndex = ?"
c.execute(sqlexpression,(pi,))
mgi = c.fetchone()[0]
return mgi
def __del__(self):
self.conn.commit()
self.conn.close()
def _repr_svg_(self):
return graphviz.Source(self.network.graphviz())._repr_svg_()
def DrawMorseGraph(self, morsegraphindex):
c = self.conn.cursor()
sqlexpression = "select Graphviz from MorseGraphViz where MorseGraphIndex = ?"
c.execute(sqlexpression,(morsegraphindex,))
gv = c.fetchone()[0]
return graphviz.Source(gv)
| true | true |
f71cacb71c497b993580e8b6ab79d5b35f0c8185 | 7,853 | py | Python | lit_nlp/examples/sst_pytorch_demo.py | johnson7788/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | 1 | 2021-04-12T22:57:04.000Z | 2021-04-12T22:57:04.000Z | lit_nlp/examples/sst_pytorch_demo.py | johnson7788/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | 4 | 2022-02-14T19:37:07.000Z | 2022-02-27T20:24:08.000Z | lit_nlp/examples/sst_pytorch_demo.py | haaami01/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
r"""Code example for a custom model, using PyTorch.
This demo shows how to use a custom model with LIT, in just a few lines of code.
We'll use a transformers model, with a minimal amount of code to implement the
LIT API. Compared to models/glue_models.py, this has fewer features, but the
code is more readable.
This demo is similar in functionality to simple_tf2_demo.py, but uses PyTorch
instead of TensorFlow 2.
The transformers library can load weights from either,
so you can use any saved model compatible with the underlying model class
(AutoModelForSequenceClassification). To train something for this demo, you can:
- Use quickstart_sst_demo.py, and set --model_path to somewhere durable
- Or: Use tools/glue_trainer.py
- Or: Use any fine-tuning code that works with transformers, such as
https://github.com/huggingface/transformers#quick-tour-of-the-fine-tuningusage-scripts
To run locally:
python -m lit_nlp.examples.simple_pytorch_demo \
--port=5432 --model_path=/path/to/saved/model
Then navigate to localhost:5432 to access the demo UI.
NOTE: this demo still uses TensorFlow Datasets (which depends on TensorFlow) to
load the data. However, the output of glue.SST2Data is just NumPy arrays and
plain Python data, and you can easily replace this with a different library or
directly loading from CSV.
"""
import re
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.examples.datasets import glue
from lit_nlp.lib import utils
import torch
import transformers
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_path", None,
"Path to trained model, in standard transformers format, e.g. as "
"saved by model.save_pretrained() and tokenizer.save_pretrained()")
def _from_pretrained(cls, *args, **kw):
"""Load a transformers model in PyTorch, with fallback to TF2/Keras weights."""
try:
return cls.from_pretrained(*args, **kw)
except OSError as e:
logging.warning("Caught OSError loading model: %s", e)
logging.warning(
"Re-trying to convert from TensorFlow checkpoint (from_tf=True)")
return cls.from_pretrained(*args, from_tf=True, **kw)
class SimpleSentimentModel(lit_model.Model):
"""Simple sentiment analysis model."""
LABELS = ["0", "1"] # negative, positive
compute_grads: bool = True # if True, compute and return gradients.
def __init__(self, model_name_or_path):
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
# This is a just a regular PyTorch model.
self.model = _from_pretrained(
transformers.AutoModelForSequenceClassification,
model_name_or_path,
config=model_config)
self.model.eval()
##
# LIT API implementation
def max_minibatch_size(self):
# This tells lit_model.Model.predict() how to batch inputs to
# predict_minibatch().
# Alternately, you can just override predict() and handle batching yourself.
return 32
def predict_minibatch(self, inputs):
# Preprocess to ids and masks, and make the input batch.
encoded_input = self.tokenizer.batch_encode_plus(
[ex["sentence"] for ex in inputs],
return_tensors="pt",
add_special_tokens=True,
max_length=128,
padding="longest",
truncation="longest_first")
# Check and send to cuda (GPU) if available
if torch.cuda.is_available():
self.model.cuda()
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
# Run a forward pass.
with torch.set_grad_enabled(self.compute_grads):
out: transformers.modeling_outputs.SequenceClassifierOutput = \
self.model(**encoded_input)
# Post-process outputs.
batched_outputs = {
"probas": torch.nn.functional.softmax(out.logits, dim=-1),
"input_ids": encoded_input["input_ids"],
"ntok": torch.sum(encoded_input["attention_mask"], dim=1),
"cls_emb": out.hidden_states[-1][:, 0], # last layer, first token
}
# Add attention layers to batched_outputs
assert len(out.attentions) == self.model.config.num_hidden_layers
for i, layer_attention in enumerate(out.attentions):
batched_outputs[f"layer_{i}/attention"] = layer_attention
# Request gradients after the forward pass.
# Note: hidden_states[0] includes position and segment encodings, as well as
# subword embeddings.
if self.compute_grads:
# <torch.float32>[batch_size, num_tokens, emb_dim]
scalar_pred_for_gradients = torch.max(
batched_outputs["probas"], dim=1, keepdim=False, out=None)[0]
batched_outputs["input_emb_grad"] = torch.autograd.grad(
scalar_pred_for_gradients,
out.hidden_states[0],
grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]
# Post-process outputs.
# Return as NumPy for further processing.
detached_outputs = {
k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}
# Unbatch outputs so we get one record per input example.
for output in utils.unbatch_preds(detached_outputs):
ntok = output.pop("ntok")
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[:ntok])
# set token gradients
if self.compute_grads:
output["token_grad_sentence"] = output["input_emb_grad"][:ntok]
# Process attention.
for key in output:
if not re.match(r"layer_(\d+)/attention", key):
continue
# Select only real tokens, since most of this matrix is padding.
# <float32>[num_heads, max_seq_length, max_seq_length]
# -> <float32>[num_heads, num_tokens, num_tokens]
output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))
# Make a copy of this array to avoid memory leaks, since NumPy otherwise
# keeps a pointer around that prevents the source array from being GCed.
output[key] = output[key].copy()
yield output
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self.LABELS, required=False)
}
def output_spec(self) -> lit_types.Spec:
ret = {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
"cls_emb": lit_types.Embeddings()
}
# Gradients, if requested.
if self.compute_grads:
ret["token_grad_sentence"] = lit_types.TokenGradients(
align="tokens")
# Attention heads, one field for each layer.
for i in range(self.model.config.num_hidden_layers):
ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(
align_in="tokens", align_out="tokens")
return ret
def main(_):
# Normally path is a directory; if it's an archive file, download and
# extract to the transformers cache.
model_path = FLAGS.model_path
if model_path.endswith(".tar.gz"):
model_path = transformers.file_utils.cached_path(
model_path, extract_compressed_file=True)
# Load the model we defined above.
models = {"sst": SimpleSentimentModel(model_path)}
# Load SST-2 validation set from TFDS.
datasets = {"sst_dev": glue.SST2Data("validation")}
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| 37.395238 | 86 | 0.706736 |
import re
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.examples.datasets import glue
from lit_nlp.lib import utils
import torch
import transformers
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_path", None,
"Path to trained model, in standard transformers format, e.g. as "
"saved by model.save_pretrained() and tokenizer.save_pretrained()")
def _from_pretrained(cls, *args, **kw):
try:
return cls.from_pretrained(*args, **kw)
except OSError as e:
logging.warning("Caught OSError loading model: %s", e)
logging.warning(
"Re-trying to convert from TensorFlow checkpoint (from_tf=True)")
return cls.from_pretrained(*args, from_tf=True, **kw)
class SimpleSentimentModel(lit_model.Model):
LABELS = ["0", "1"]
compute_grads: bool = True
def __init__(self, model_name_or_path):
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
self.model = _from_pretrained(
transformers.AutoModelForSequenceClassification,
model_name_or_path,
config=model_config)
self.model.eval()
def max_minibatch_size(self):
return 32
def predict_minibatch(self, inputs):
encoded_input = self.tokenizer.batch_encode_plus(
[ex["sentence"] for ex in inputs],
return_tensors="pt",
add_special_tokens=True,
max_length=128,
padding="longest",
truncation="longest_first")
if torch.cuda.is_available():
self.model.cuda()
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
with torch.set_grad_enabled(self.compute_grads):
out: transformers.modeling_outputs.SequenceClassifierOutput = \
self.model(**encoded_input)
batched_outputs = {
"probas": torch.nn.functional.softmax(out.logits, dim=-1),
"input_ids": encoded_input["input_ids"],
"ntok": torch.sum(encoded_input["attention_mask"], dim=1),
"cls_emb": out.hidden_states[-1][:, 0],
}
assert len(out.attentions) == self.model.config.num_hidden_layers
for i, layer_attention in enumerate(out.attentions):
batched_outputs[f"layer_{i}/attention"] = layer_attention
if self.compute_grads:
scalar_pred_for_gradients = torch.max(
batched_outputs["probas"], dim=1, keepdim=False, out=None)[0]
batched_outputs["input_emb_grad"] = torch.autograd.grad(
scalar_pred_for_gradients,
out.hidden_states[0],
grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]
detached_outputs = {
k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}
for output in utils.unbatch_preds(detached_outputs):
ntok = output.pop("ntok")
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[:ntok])
if self.compute_grads:
output["token_grad_sentence"] = output["input_emb_grad"][:ntok]
for key in output:
if not re.match(r"layer_(\d+)/attention", key):
continue
output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))
output[key] = output[key].copy()
yield output
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self.LABELS, required=False)
}
def output_spec(self) -> lit_types.Spec:
ret = {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
"cls_emb": lit_types.Embeddings()
}
if self.compute_grads:
ret["token_grad_sentence"] = lit_types.TokenGradients(
align="tokens")
for i in range(self.model.config.num_hidden_layers):
ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(
align_in="tokens", align_out="tokens")
return ret
def main(_):
# extract to the transformers cache.
model_path = FLAGS.model_path
if model_path.endswith(".tar.gz"):
model_path = transformers.file_utils.cached_path(
model_path, extract_compressed_file=True)
# Load the model we defined above.
models = {"sst": SimpleSentimentModel(model_path)}
# Load SST-2 validation set from TFDS.
datasets = {"sst_dev": glue.SST2Data("validation")}
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| true | true |
f71cae616991607462e2bfde3a5cc705076fafbc | 6,982 | py | Python | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | 33 | 2018-08-13T02:52:15.000Z | 2018-10-16T03:38:11.000Z | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | null | null | null | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | 4 | 2018-08-13T05:26:11.000Z | 2018-09-06T09:59:52.000Z | # coding=utf8
import os
from threading import Thread
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty
import requests
import logging
import traceback
error_logger = logging.getLogger('error')
error_logger.setLevel(logging.ERROR)
ERROR_STATUS = -1
def error_catch(func):
def wrap(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except:
error_logger.error(traceback.format_exc())
return ERROR_STATUS
return wrap
def fetch(url, method='GET', **kwargs):
"""
fetch the url and return the http response body
implement the same api as requests.request
:param url:
:param method: method for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: str (or unicode in python2) or ERROR_STATUS
str for the http response body
ERROR_STATUS means fetch error
"""
resp = requests.request(method, url, **kwargs)
html_content = resp.text
return html_content
class Spider(object):
def __init__(self, parse_func, save_func):
self.q_fetch = Queue() # element (url, request_params_dict) content_dict is request_params
self.q_parse = Queue() # element (url, request_params_dict, content_dict) content_dict is {'content': response.content}
self.q_save = Queue() # element (url, request_params_dict, content_dict) content_dict is key_value_pair to save
self._fetch = error_catch(fetch)
self._parse = error_catch(parse_func)
self._save = error_catch(save_func)
def set_start_url(self, url, **kw):
"""
:param url:
:param kw:
:return: None
"""
self.q_fetch.put_nowait((url, kw))
def add_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def start_fetch(self):
while True:
try:
url, params = self.q_fetch.get(block=True, timeout=5)
print('----- fetch start: url={} -----\n'.format(url))
result = self._fetch(url, **params)
if result == ERROR_STATUS:
continue
html_content = result
print('----- fetch end: url={} -----\n'.format(url))
self.q_parse.put_nowait((url, params, {'html_content': html_content}))
except QueueEmpty:
break
def start_parse(self):
while True:
try:
url, params, content = self.q_parse.get(block=True, timeout=5)
print('----- parse start: url={} -----\n'.format(url))
result = self._parse(url, params, html_content=content['html_content'])
if result == ERROR_STATUS:
continue
url_to_fetch_list, content_to_save = result
print('----- parse end: url={} -----\n'.format(url))
# put new url to q_fetch
for item in url_to_fetch_list:
self.q_fetch.put_nowait(item)
# put to q_save
self.q_save.put_nowait((url, params, {'content_to_save': content_to_save}))
except QueueEmpty:
break
def start_save(self):
while True:
try:
url, params, content = self.q_save.get(block=True, timeout=5)
print('----- save start: url={} -----\n'.format(url))
result = self._save(url, params, content=content['content_to_save'])
if result == ERROR_STATUS:
continue
print('----- save end: url={} -----\n'.format(url))
except QueueEmpty:
break
@error_catch
def start_crawl(self):
thread_pool_fetch = [Thread(target=self.start_fetch, args=()) for i in range(5)]
thread_pool_parse = [Thread(target=self.start_parse, args=()) for i in range(5)]
thread_pool_save = [Thread(target=self.start_save, args=()) for i in range(5)]
for td in thread_pool_fetch:
td.start()
for td in thread_pool_parse:
td.start()
for td in thread_pool_save:
td.start()
for td in thread_pool_fetch:
if td.is_alive():
td.join()
for td in thread_pool_parse:
if td.is_alive():
td.join()
for td in thread_pool_save:
if td.is_alive():
td.join()
def parse(url, request_params, html_content):
"""
parse content in html_content based on url
:param url:
:param html_content: http response body of url
:return: tuple or ERROR_STATUS
tuple (new_url_to_fetch_list, parsed_content_to_save)
ERROR_STATUS means parse failed
"""
raise NotImplemented
def save(url, request_params, content):
"""
save content based on url
:param url:
:param content:
:return: anything or ERROR_STATUS
ERROR_STATUS means save failed
"""
raise NotImplemented
if __name__ == '__main__':
def parse(url, request_params, html_content):
print(html_content)
result = ([], '')
if url == 'http://www.baidu.com':
result = ([('http://www.sina.com', {}), ('http://www.qq.com', {})], 'welcome to baidu')
if url == 'http://www.sina.com':
result = ([], 'welcome to sina')
if url == 'http://www.qq.com':
result = ([], 'welcome to qq')
return result
def save(url, request_params, content):
print(content)
spider = Spider(parse, save)
spider.set_start_url('http://www.baidu.com')
spider.start_crawl()
| 32.474419 | 136 | 0.596534 |
import os
from threading import Thread
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty
import requests
import logging
import traceback
error_logger = logging.getLogger('error')
error_logger.setLevel(logging.ERROR)
ERROR_STATUS = -1
def error_catch(func):
def wrap(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except:
error_logger.error(traceback.format_exc())
return ERROR_STATUS
return wrap
def fetch(url, method='GET', **kwargs):
resp = requests.request(method, url, **kwargs)
html_content = resp.text
return html_content
class Spider(object):
def __init__(self, parse_func, save_func):
self.q_fetch = Queue()
self.q_parse = Queue()
self.q_save = Queue()
self._fetch = error_catch(fetch)
self._parse = error_catch(parse_func)
self._save = error_catch(save_func)
def set_start_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def add_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def start_fetch(self):
while True:
try:
url, params = self.q_fetch.get(block=True, timeout=5)
print('----- fetch start: url={} -----\n'.format(url))
result = self._fetch(url, **params)
if result == ERROR_STATUS:
continue
html_content = result
print('----- fetch end: url={} -----\n'.format(url))
self.q_parse.put_nowait((url, params, {'html_content': html_content}))
except QueueEmpty:
break
def start_parse(self):
while True:
try:
url, params, content = self.q_parse.get(block=True, timeout=5)
print('----- parse start: url={} -----\n'.format(url))
result = self._parse(url, params, html_content=content['html_content'])
if result == ERROR_STATUS:
continue
url_to_fetch_list, content_to_save = result
print('----- parse end: url={} -----\n'.format(url))
for item in url_to_fetch_list:
self.q_fetch.put_nowait(item)
self.q_save.put_nowait((url, params, {'content_to_save': content_to_save}))
except QueueEmpty:
break
def start_save(self):
while True:
try:
url, params, content = self.q_save.get(block=True, timeout=5)
print('----- save start: url={} -----\n'.format(url))
result = self._save(url, params, content=content['content_to_save'])
if result == ERROR_STATUS:
continue
print('----- save end: url={} -----\n'.format(url))
except QueueEmpty:
break
@error_catch
def start_crawl(self):
thread_pool_fetch = [Thread(target=self.start_fetch, args=()) for i in range(5)]
thread_pool_parse = [Thread(target=self.start_parse, args=()) for i in range(5)]
thread_pool_save = [Thread(target=self.start_save, args=()) for i in range(5)]
for td in thread_pool_fetch:
td.start()
for td in thread_pool_parse:
td.start()
for td in thread_pool_save:
td.start()
for td in thread_pool_fetch:
if td.is_alive():
td.join()
for td in thread_pool_parse:
if td.is_alive():
td.join()
for td in thread_pool_save:
if td.is_alive():
td.join()
def parse(url, request_params, html_content):
raise NotImplemented
def save(url, request_params, content):
raise NotImplemented
if __name__ == '__main__':
def parse(url, request_params, html_content):
print(html_content)
result = ([], '')
if url == 'http://www.baidu.com':
result = ([('http://www.sina.com', {}), ('http://www.qq.com', {})], 'welcome to baidu')
if url == 'http://www.sina.com':
result = ([], 'welcome to sina')
if url == 'http://www.qq.com':
result = ([], 'welcome to qq')
return result
def save(url, request_params, content):
print(content)
spider = Spider(parse, save)
spider.set_start_url('http://www.baidu.com')
spider.start_crawl()
| true | true |
f71caea71cfc518c2ef4111293c2ff14384cf596 | 1,255 | py | Python | src/sentry/api/endpoints/project_environments.py | apragacz/sf-sentry | 2fdd6c1195c29a1d401d1cd538c22ea68556699a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T15:40:12.000Z | 2018-03-05T15:40:12.000Z | src/sentry/api/endpoints/project_environments.py | pitchin/sentry | ff6f260e9edb726374d2e4f455ff8b3d0ecd551e | [
"BSD-3-Clause"
] | 1 | 2018-08-22T16:49:48.000Z | 2018-08-22T16:49:48.000Z | src/sentry/api/endpoints/project_environments.py | pitchin/sentry | ff6f260e9edb726374d2e4f455ff8b3d0ecd551e | [
"BSD-3-Clause"
] | 1 | 2018-07-02T09:46:44.000Z | 2018-07-02T09:46:44.000Z | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import EnvironmentProject
environment_visibility_filter_options = {
'all': lambda queryset: queryset,
'hidden': lambda queryset: queryset.filter(is_hidden=True),
'visible': lambda queryset: queryset.exclude(is_hidden=True),
}
class ProjectEnvironmentsEndpoint(ProjectEndpoint):
def get(self, request, project):
queryset = EnvironmentProject.objects.filter(
project=project,
).select_related('environment').order_by('environment__name')
visibility = request.GET.get('visibility', 'visible')
if visibility not in environment_visibility_filter_options:
return Response({
'detail': 'Invalid value for \'visibility\', valid values are: {!r}'.format(
environment_visibility_filter_options.keys(),
),
}, status=400)
add_visibility_filters = environment_visibility_filter_options[visibility]
queryset = add_visibility_filters(queryset)
return Response(serialize(list(queryset), request.user))
| 35.857143 | 92 | 0.713147 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import EnvironmentProject
environment_visibility_filter_options = {
'all': lambda queryset: queryset,
'hidden': lambda queryset: queryset.filter(is_hidden=True),
'visible': lambda queryset: queryset.exclude(is_hidden=True),
}
class ProjectEnvironmentsEndpoint(ProjectEndpoint):
def get(self, request, project):
queryset = EnvironmentProject.objects.filter(
project=project,
).select_related('environment').order_by('environment__name')
visibility = request.GET.get('visibility', 'visible')
if visibility not in environment_visibility_filter_options:
return Response({
'detail': 'Invalid value for \'visibility\', valid values are: {!r}'.format(
environment_visibility_filter_options.keys(),
),
}, status=400)
add_visibility_filters = environment_visibility_filter_options[visibility]
queryset = add_visibility_filters(queryset)
return Response(serialize(list(queryset), request.user))
| true | true |
f71caf4a0f239065a54f05daee5fc3a53ea19433 | 696 | py | Python | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | # --------------------------
# UFSC - CTC - INE - INE5663
# Exercício da Tribo
# --------------------------
# Classe responsável por criar uma tribo
#
from model.tribo import Tribo
from view.paineis.painel_abstrato import PainelAbstrato
class PainelCriaTribo(PainelAbstrato):
def __init__(self, iu):
super().__init__('Criar Tribo', iu)
def _interaja(self):
nome = input('Nome da tribo: ')
qtd_guerreiros = int(input('Quantidade máxima de guerreiros: '))
qtd_vidas = int(input('Quantidade máxima de vidas de cada guerreiro: '))
tribo = Tribo(nome, qtd_guerreiros, qtd_vidas)
self._iu.armazene_tribo(tribo)
print('Tribo criada!')
| 30.26087 | 80 | 0.627874 |
from model.tribo import Tribo
from view.paineis.painel_abstrato import PainelAbstrato
class PainelCriaTribo(PainelAbstrato):
def __init__(self, iu):
super().__init__('Criar Tribo', iu)
def _interaja(self):
nome = input('Nome da tribo: ')
qtd_guerreiros = int(input('Quantidade máxima de guerreiros: '))
qtd_vidas = int(input('Quantidade máxima de vidas de cada guerreiro: '))
tribo = Tribo(nome, qtd_guerreiros, qtd_vidas)
self._iu.armazene_tribo(tribo)
print('Tribo criada!')
| true | true |
f71cb0c1773a3937199f2475478d123c6d026639 | 3,726 | py | Python | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | # Used in setup.py
# -*- coding: utf-8 -*-
VERSION = "0.1.1"
PROJECT_PACKAGE_NAME = "lupupy"
PROJECT_LICENSE = "MIT"
PROJECT_URL = "http://www.github.com/majuss/lupupy"
PROJECT_DESCRIPTION = "A python cli for Lupusec alarm panels."
PROJECT_LONG_DESCRIPTION = (
"lupupy is a python3 interface for"
" the Lupus Electronics alarm panel."
" Its intented to get used in various"
" smart home services to get a full"
" integration of all you devices."
)
PROJECT_AUTHOR = "Majuss"
MODE_AWAY = "Arm"
MODE_HOME = "Home"
MODE_DISARMED = "Disarm"
MODE_ALARM_TRIGGERED = "Einbruch"
ALL_MODES = [MODE_DISARMED, MODE_HOME, MODE_AWAY]
MODE_TRANSLATION_XT1 = {"Disarm": 2, "Home": 1, "Arm": 0}
MODE_TRANSLATION_XT2 = {"Disarm": 0, "Arm": 1, "Home": 2}
XT2_MODES_TO_TEXT = {
"{AREA_MODE_0}": "Disarm",
"{AREA_MODE_1}": "Arm",
"{AREA_MODE_2}": "Home",
"{AREA_MODE_3}": "Home",
"{AREA_MODE_4}": "Home",
}
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_TRIGGERED = "alarm_triggered"
MODE_TRANSLATION_GENERIC = {
"Disarm": "disarmed",
"Home": "armed_home",
"Arm": "armed_away",
}
DEFAULT_MODE = MODE_AWAY
HISTORY_REQUEST = "historyGet"
HISTORY_ALARM_COLUMN = "a"
HISTORY_HEADER = "hisrows"
HISTORY_CACHE_NAME = ".lupusec_history_cache"
STATUS_ON_INT = 0
STATUS_ON = "on"
STATUS_OFF_INT = 1
STATUS_OFF = "off"
STATUS_OFFLINE = "offline"
STATUS_CLOSED = "Geschlossen"
STATUS_CLOSED_INT = 0
STATUS_OPEN = "Offen"
STATUS_OPEN_INT = 1
ALARM_NAME = "Lupusec Alarm"
ALARM_DEVICE_ID = "0"
ALARM_TYPE = "Alarm"
# GENERIC Lupusec DEVICE TYPES
TYPE_WINDOW = "Fensterkontakt"
TYPE_DOOR = "Türkontakt"
TYPE_CONTACT_XT2 = 4
TYPE_WATER_XT2 = 5
TYPE_SMOKE_XT2 = 11
TYPE_POWER_SWITCH_1_XT2 = 24
TYPE_POWER_SWITCH_2_XT2 = 25
TYPE_POWER_SWITCH = "Steckdose"
TYPE_SWITCH = [TYPE_POWER_SWITCH, TYPE_POWER_SWITCH_1_XT2, TYPE_POWER_SWITCH_2_XT2]
TYPE_OPENING = [TYPE_DOOR, TYPE_WINDOW, TYPE_CONTACT_XT2]
BINARY_SENSOR_TYPES = TYPE_OPENING
TYPE_SENSOR = ["Rauchmelder", "Wassermelder", TYPE_WATER_XT2, TYPE_SMOKE_XT2]
TYPE_TRANSLATION = {
"Fensterkontakt": "window",
"Türkontakt": "door",
TYPE_CONTACT_XT2: "Fenster-/Türkontakt",
TYPE_WATER_XT2: "Wassermelder",
TYPE_SMOKE_XT2: "Rauchmelder",
}
DEVICES_API_XT1 = "sensorListGet"
DEVICES_API_XT2 = "deviceListGet"
urlTokenGet: str = '/action/tokenGet'
urlLogoutPost = '/action/logout'
urlDeviceListGet = '/action/deviceListGet'
urlDevicePSSListGet = '/action/deviceListPSSGet'
urlDeviceGet = '/action/deviceGet'
urlPanelCondGet = '/action/panelCondGet'
urlPanelCondPost = '/action/panelCondPost'
urlDeviceSwitchPSSPost = '/action/deviceSwitchPSSPost'
urlHaExecutePost = '/action/haExecutePost'
urlDeviceEditGet = '/action/deviceEditGet'
urlDeviceEditPost = '/action/deviceEditPost'
urlDeviceSwitchDimmerPost = '/action/deviceSwitchDimmerPost'
urlDeviceHueColorControl = '/action/deviceHueColorControl'
urlDeviceEditThermoPost = '/action/deviceEditThermoPost'
urlDeviceEditThermoGet = '/action/deviceEditThermoGet'
urlDeviceEditShutterPost = '/action/deviceEditShutterPost'
urlDeviceEditShutterGet = '/action/deviceEditShutterGet'
urlDeviceEditMeterGet = '/action/deviceEditMeterGet'
urlDeviceEditMeterPost = '/action/deviceEditMeterPost'
urlDeviceNukiCmd = '/action/nukiCmd'
urlIpcamGet = '/action/ipcamGet'
urlPasthru = '/action/passthru'
urlDeviceListUPICGet = '/action/deviceListUPICGet'
urlDeviceDoUPICPost = '/action/deviceDoUPICPost'
urlSendSMSPost = '/action/sendSMSPost'
urlSmsgwTestPost = '/action/smsgwTestPost'
urlSystemGet = '/action/systemGet'
urlLogsGet = '/action/logsGet'
urlrecordListGet = '/action/recordListGet'
urlwelcomeGet = '/action/welcomeGet'
| 32.4 | 83 | 0.766774 |
VERSION = "0.1.1"
PROJECT_PACKAGE_NAME = "lupupy"
PROJECT_LICENSE = "MIT"
PROJECT_URL = "http://www.github.com/majuss/lupupy"
PROJECT_DESCRIPTION = "A python cli for Lupusec alarm panels."
PROJECT_LONG_DESCRIPTION = (
"lupupy is a python3 interface for"
" the Lupus Electronics alarm panel."
" Its intented to get used in various"
" smart home services to get a full"
" integration of all you devices."
)
PROJECT_AUTHOR = "Majuss"
MODE_AWAY = "Arm"
MODE_HOME = "Home"
MODE_DISARMED = "Disarm"
MODE_ALARM_TRIGGERED = "Einbruch"
ALL_MODES = [MODE_DISARMED, MODE_HOME, MODE_AWAY]
MODE_TRANSLATION_XT1 = {"Disarm": 2, "Home": 1, "Arm": 0}
MODE_TRANSLATION_XT2 = {"Disarm": 0, "Arm": 1, "Home": 2}
XT2_MODES_TO_TEXT = {
"{AREA_MODE_0}": "Disarm",
"{AREA_MODE_1}": "Arm",
"{AREA_MODE_2}": "Home",
"{AREA_MODE_3}": "Home",
"{AREA_MODE_4}": "Home",
}
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_TRIGGERED = "alarm_triggered"
MODE_TRANSLATION_GENERIC = {
"Disarm": "disarmed",
"Home": "armed_home",
"Arm": "armed_away",
}
DEFAULT_MODE = MODE_AWAY
HISTORY_REQUEST = "historyGet"
HISTORY_ALARM_COLUMN = "a"
HISTORY_HEADER = "hisrows"
HISTORY_CACHE_NAME = ".lupusec_history_cache"
STATUS_ON_INT = 0
STATUS_ON = "on"
STATUS_OFF_INT = 1
STATUS_OFF = "off"
STATUS_OFFLINE = "offline"
STATUS_CLOSED = "Geschlossen"
STATUS_CLOSED_INT = 0
STATUS_OPEN = "Offen"
STATUS_OPEN_INT = 1
ALARM_NAME = "Lupusec Alarm"
ALARM_DEVICE_ID = "0"
ALARM_TYPE = "Alarm"
TYPE_WINDOW = "Fensterkontakt"
TYPE_DOOR = "Türkontakt"
TYPE_CONTACT_XT2 = 4
TYPE_WATER_XT2 = 5
TYPE_SMOKE_XT2 = 11
TYPE_POWER_SWITCH_1_XT2 = 24
TYPE_POWER_SWITCH_2_XT2 = 25
TYPE_POWER_SWITCH = "Steckdose"
TYPE_SWITCH = [TYPE_POWER_SWITCH, TYPE_POWER_SWITCH_1_XT2, TYPE_POWER_SWITCH_2_XT2]
TYPE_OPENING = [TYPE_DOOR, TYPE_WINDOW, TYPE_CONTACT_XT2]
BINARY_SENSOR_TYPES = TYPE_OPENING
TYPE_SENSOR = ["Rauchmelder", "Wassermelder", TYPE_WATER_XT2, TYPE_SMOKE_XT2]
TYPE_TRANSLATION = {
"Fensterkontakt": "window",
"Türkontakt": "door",
TYPE_CONTACT_XT2: "Fenster-/Türkontakt",
TYPE_WATER_XT2: "Wassermelder",
TYPE_SMOKE_XT2: "Rauchmelder",
}
DEVICES_API_XT1 = "sensorListGet"
DEVICES_API_XT2 = "deviceListGet"
urlTokenGet: str = '/action/tokenGet'
urlLogoutPost = '/action/logout'
urlDeviceListGet = '/action/deviceListGet'
urlDevicePSSListGet = '/action/deviceListPSSGet'
urlDeviceGet = '/action/deviceGet'
urlPanelCondGet = '/action/panelCondGet'
urlPanelCondPost = '/action/panelCondPost'
urlDeviceSwitchPSSPost = '/action/deviceSwitchPSSPost'
urlHaExecutePost = '/action/haExecutePost'
urlDeviceEditGet = '/action/deviceEditGet'
urlDeviceEditPost = '/action/deviceEditPost'
urlDeviceSwitchDimmerPost = '/action/deviceSwitchDimmerPost'
urlDeviceHueColorControl = '/action/deviceHueColorControl'
urlDeviceEditThermoPost = '/action/deviceEditThermoPost'
urlDeviceEditThermoGet = '/action/deviceEditThermoGet'
urlDeviceEditShutterPost = '/action/deviceEditShutterPost'
urlDeviceEditShutterGet = '/action/deviceEditShutterGet'
urlDeviceEditMeterGet = '/action/deviceEditMeterGet'
urlDeviceEditMeterPost = '/action/deviceEditMeterPost'
urlDeviceNukiCmd = '/action/nukiCmd'
urlIpcamGet = '/action/ipcamGet'
urlPasthru = '/action/passthru'
urlDeviceListUPICGet = '/action/deviceListUPICGet'
urlDeviceDoUPICPost = '/action/deviceDoUPICPost'
urlSendSMSPost = '/action/sendSMSPost'
urlSmsgwTestPost = '/action/smsgwTestPost'
urlSystemGet = '/action/systemGet'
urlLogsGet = '/action/logsGet'
urlrecordListGet = '/action/recordListGet'
urlwelcomeGet = '/action/welcomeGet'
| true | true |
f71cb55ac21dc79bb494db37b62d30dc5c9b3af6 | 597 | py | Python | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | 1 | 2017-03-11T23:10:00.000Z | 2017-03-11T23:10:00.000Z | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | null | null | null | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | 1 | 2021-04-30T15:34:24.000Z | 2021-04-30T15:34:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: globvars.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import six
import argparse
__all__ = ['globalns', 'use_global_argument']
if six.PY2:
class NS:
pass
else:
import types
NS = types.SimpleNamespace
globalns = NS()
def use_global_argument(args):
"""
Add the content of :class:`argparse.Namespace` to globalns.
Args:
args (argparse.Namespace): arguments
"""
assert isinstance(args, argparse.Namespace), type(args)
for k, v in six.iteritems(vars(args)):
setattr(globalns, k, v)
| 19.258065 | 63 | 0.649916 |
import six
import argparse
__all__ = ['globalns', 'use_global_argument']
if six.PY2:
class NS:
pass
else:
import types
NS = types.SimpleNamespace
globalns = NS()
def use_global_argument(args):
assert isinstance(args, argparse.Namespace), type(args)
for k, v in six.iteritems(vars(args)):
setattr(globalns, k, v)
| true | true |
f71cb812c630d4ea90200d9a5c076f1b4590a71e | 1,220 | py | Python | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z | # LIBTBX_SET_DISPATCHER_NAME iotbx.pdb.sort_atoms
from __future__ import absolute_import, division, print_function
from libtbx.utils import Usage
import sys
import iotbx.pdb
import mmtbx.model
master_phil_str = """
file_name = None
.type = path
.multiple = False
.optional = False
.style = hidden
"""
def show_usage():
help_msg = """\
iotbx.pdb.sort_atoms model.pdb
Sort atoms in residues so they will be in the same order in all residues.
Also renumbers atoms (atom serial number field 7-11 columns)."""
raise Usage(help_msg)
def run(args):
if len(args) == 0:
show_usage()
return
inp_fn = args[0]
pdb_input = iotbx.pdb.input(
file_name=inp_fn,
source_info=None,
raise_sorry_if_format_error=True)
model = mmtbx.model.manager(
model_input = pdb_input)
out_fn_prefix = inp_fn
if inp_fn.endswith(".pdb") or inp_fn.endswith(".cif"):
out_fn_prefix = inp_fn[:-4]
out_fn = out_fn_prefix + "_sorted"
txt = ""
if model.input_format_was_cif():
out_fn += ".cif"
txt = model.model_as_mmcif()
else:
out_fn += ".pdb"
txt = model.model_as_pdb()
with open(out_fn, 'w') as f:
f.write(txt)
if (__name__ == "__main__"):
run(sys.argv[1:])
| 22.181818 | 73 | 0.685246 |
from __future__ import absolute_import, division, print_function
from libtbx.utils import Usage
import sys
import iotbx.pdb
import mmtbx.model
master_phil_str = """
file_name = None
.type = path
.multiple = False
.optional = False
.style = hidden
"""
def show_usage():
help_msg = """\
iotbx.pdb.sort_atoms model.pdb
Sort atoms in residues so they will be in the same order in all residues.
Also renumbers atoms (atom serial number field 7-11 columns)."""
raise Usage(help_msg)
def run(args):
if len(args) == 0:
show_usage()
return
inp_fn = args[0]
pdb_input = iotbx.pdb.input(
file_name=inp_fn,
source_info=None,
raise_sorry_if_format_error=True)
model = mmtbx.model.manager(
model_input = pdb_input)
out_fn_prefix = inp_fn
if inp_fn.endswith(".pdb") or inp_fn.endswith(".cif"):
out_fn_prefix = inp_fn[:-4]
out_fn = out_fn_prefix + "_sorted"
txt = ""
if model.input_format_was_cif():
out_fn += ".cif"
txt = model.model_as_mmcif()
else:
out_fn += ".pdb"
txt = model.model_as_pdb()
with open(out_fn, 'w') as f:
f.write(txt)
if (__name__ == "__main__"):
run(sys.argv[1:])
| true | true |
f71cb926199d235645c93f0a046fc2b7260452e8 | 1,138 | py | Python | machine-learning-pipeline/airflow/dags/train_simple_model.py | dataength/automating-your-data-pipeline-with-apache-airflow | 19b7fe4a41874708c5927b7c32f9840f4285090c | [
"MIT"
] | 30 | 2020-07-09T17:37:47.000Z | 2022-01-19T04:17:02.000Z | machine-learning-pipeline/airflow/dags/train_simple_model.py | mizzony/automating-your-data-pipeline-with-apache-airflow | 90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e | [
"MIT"
] | 38 | 2021-08-12T08:01:47.000Z | 2022-03-29T22:29:27.000Z | machine-learning-pipeline/airflow/dags/train_simple_model.py | mizzony/automating-your-data-pipeline-with-apache-airflow | 90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e | [
"MIT"
] | 22 | 2020-07-10T02:41:39.000Z | 2022-03-23T22:08:52.000Z | import pickle
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
from sklearn.ensemble import RandomForestClassifier
default_args = {
'owner': 'ODDS',
}
dag = DAG(
'train_simple_model',
schedule_interval='*/15 * * * *',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False
)
start = DummyOperator(task_id='start', dag=dag)
def train_func():
clf = RandomForestClassifier(random_state=0)
X = [[ 1, 2, 3],
[11, 12, 13]]
y = [0, 1]
clf.fit(X, y)
MODEL_PATH = '/Users/zkan/Projects/dataength/' \
'automating-your-data-pipeline-with-apache-airflow/' \
'machine-learning-pipeline/airflow/dags'
with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:
pickle.dump(clf, outfile)
train = PythonOperator(
task_id='train',
python_callable=train_func,
dag=dag,
)
end = DummyOperator(task_id='end', dag=dag)
start >> train >> end
| 22.76 | 65 | 0.692443 | import pickle
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
from sklearn.ensemble import RandomForestClassifier
default_args = {
'owner': 'ODDS',
}
dag = DAG(
'train_simple_model',
schedule_interval='*/15 * * * *',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False
)
start = DummyOperator(task_id='start', dag=dag)
def train_func():
clf = RandomForestClassifier(random_state=0)
X = [[ 1, 2, 3],
[11, 12, 13]]
y = [0, 1]
clf.fit(X, y)
MODEL_PATH = '/Users/zkan/Projects/dataength/' \
'automating-your-data-pipeline-with-apache-airflow/' \
'machine-learning-pipeline/airflow/dags'
with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:
pickle.dump(clf, outfile)
train = PythonOperator(
task_id='train',
python_callable=train_func,
dag=dag,
)
end = DummyOperator(task_id='end', dag=dag)
start >> train >> end
| true | true |
f71cba9b88574b1dfb171079ea67df5863e28a5e | 1,843 | py | Python | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | 4 | 2018-09-15T02:43:04.000Z | 2022-02-11T01:56:49.000Z | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | null | null | null | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | 5 | 2018-03-12T10:01:48.000Z | 2021-11-05T05:34:48.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: toddler
import jieba
import re
import os
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def cut_analyze(input_file):
"""
:param input_file: 输入带切词分析的文本路径
:return: (list1, list2) list1切词处理后的列表结果, list2输出切词处理排序后的词频结果, 列表-元祖嵌套结果
"""
cpc_dict_path = u'user_dict/cpc_dictionary.txt'
stop_words_path = u'user_dict/stopword.txt'
with open(input_file) as f:
content = f.read()
with open(stop_words_path) as sf:
st_content = sf.readlines()
jieba.load_userdict(cpc_dict_path) # 加载针对全国人民代表大会的分词词典
stop_words = [line.strip().decode('utf-8') for line in st_content] # 将读取的数据都转为unicode处理
seg_list = jieba.cut(content, cut_all=False) # 精确模式
filter_seg_list = list()
for seg in seg_list:
goal_word = ''.join(re.findall(u'[\u4e00-\u9fa5]+', seg)).strip() # 过滤所有非中文字符内容
if len(goal_word) != 0 and not stop_words.__contains__(goal_word): # 过滤分词结果中的停词内容
# filter_seg_list.append(goal_word.encode('utf-8')) # 将unicode的文本转为utf-8保存到列表以备后续处理
filter_seg_list.append(goal_word)
seg_counter_all = Counter(filter_seg_list).most_common() # 对切词结果按照词频排序
# for item in seg_counter_all:
# print "词语: {0} - 频数: {1}".format(item[0].encode('utf-8'), item[1])
return filter_seg_list, seg_counter_all
def main():
input_file_path = u'input_file/nighteen-cpc.txt'
cut_data, sort_data = cut_analyze(input_file=input_file_path)
font = os.path.abspath('assets/msyh.ttf')
wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)
wc.generate_from_frequencies(dict(sort_data))
plt.figure()
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main() | 29.725806 | 96 | 0.688009 |
import jieba
import re
import os
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def cut_analyze(input_file):
cpc_dict_path = u'user_dict/cpc_dictionary.txt'
stop_words_path = u'user_dict/stopword.txt'
with open(input_file) as f:
content = f.read()
with open(stop_words_path) as sf:
st_content = sf.readlines()
jieba.load_userdict(cpc_dict_path)
stop_words = [line.strip().decode('utf-8') for line in st_content]
seg_list = jieba.cut(content, cut_all=False)
filter_seg_list = list()
for seg in seg_list:
goal_word = ''.join(re.findall(u'[\u4e00-\u9fa5]+', seg)).strip()
if len(goal_word) != 0 and not stop_words.__contains__(goal_word):
pend(goal_word)
seg_counter_all = Counter(filter_seg_list).most_common()
return filter_seg_list, seg_counter_all
def main():
input_file_path = u'input_file/nighteen-cpc.txt'
cut_data, sort_data = cut_analyze(input_file=input_file_path)
font = os.path.abspath('assets/msyh.ttf')
wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)
wc.generate_from_frequencies(dict(sort_data))
plt.figure()
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main() | true | true |
f71cbafafa7b775082fc935301d70d2a60767f9b | 6,977 | py | Python | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | import sys
import math
import pathlib
import bpy
import mathutils
from PIL import Image
modelDir = pathlib.Path(__file__).parent.absolute()
scn = bpy.context.scene
images_created = 0
def update_camera(camera, focus_point=mathutils.Vector((0.0, 0.0, 0.0)), distance=10.0):
"""
Focus the camera to a focus point and place the camera at a specific distance from that
focus point. The camera stays in a direct line with the focus point.
:param camera: the camera object
:type camera: bpy.types.object
:param focus_point: the point to focus on (default=``mathutils.Vector((0.0, 0.0, 0.0))``)
:type focus_point: mathutils.Vector
:param distance: the distance to keep to the focus point (default=``10.0``)
:type distance: float
"""
looking_direction = camera.location - focus_point
rot_quat = looking_direction.to_track_quat('Z', 'Y')
camera.rotation_euler = rot_quat.to_euler()
camera.rotation_euler[0] = math.radians(54.736) # angle for isometric projection
#camera.location = rot_quat * mathutils.Vector((0.0, 0.0, distance))
# update_camera(bpy.data.objects['Camera'])
def render_direction(direction_name, camera_x, camera_y):
global images_created
filepath = f"{modelDir}/renders/{images_created}.png"
camera_object_name = f"CameraObj-{direction_name}"
cam_obj = bpy.data.objects.get(camera_object_name)
if (not cam_obj):
cam = bpy.data.cameras.new(f"Camera-{direction_name}")
cam.lens = 18
cam.type = 'ORTHO'
cam.ortho_scale = 1.4
# create the first camera object
cam_obj = bpy.data.objects.new(camera_object_name, cam)
cam_obj.location = (camera_x, camera_y, 0.5)
cam_obj.rotation_euler = (0, 0, 0)
scn.collection.objects.link(cam_obj)
update_camera(cam_obj)
scn.camera = cam_obj
bpy.context.scene.render.filepath = filepath
bpy.ops.render.render(animation=False, write_still=True,
use_viewport=False, layer='', scene='')
images_created = images_created + 1
return filepath
def render_frames(files):
offset = 0.4
files.append(render_direction("W", -offset, 0))
files.append(render_direction("NW", -offset, -offset))
files.append(render_direction("N", 0, -offset))
files.append(render_direction("NE", offset, -offset))
files.append(render_direction("E", offset, 0))
files.append(render_direction("SE", offset, offset))
files.append(render_direction("S", 0, offset))
files.append(render_direction("SW", -offset, offset))
def renderDuck(skin_name):
body_texture_image = bpy.data.images[f"duck-texture-{skin_name}"]
body_material = bpy.data.materials.get("duck-body")
body_bsdf = body_material.node_tree.nodes["Principled BSDF"]
body_shader_node_texture_image = body_material.node_tree.nodes.new('ShaderNodeTexImage')
body_shader_node_texture_image.image = body_texture_image
body_material.node_tree.links.new(body_bsdf.inputs['Base Color'], body_shader_node_texture_image.outputs['Color'])
wing_texture_image = bpy.data.images[f"duck-wing-texture-{skin_name}"]
wing_material = bpy.data.materials.get("duck-wing")
wing_bsdf = wing_material.node_tree.nodes["Principled BSDF"]
wing_shader_node_texture_image = wing_material.node_tree.nodes.new('ShaderNodeTexImage')
wing_shader_node_texture_image.image = wing_texture_image
wing_material.node_tree.links.new(wing_bsdf.inputs['Base Color'], wing_shader_node_texture_image.outputs['Color'])
files = []
# tail wagging
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.0
# feeding
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.0
# mouth
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.0
# swim flapping
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 0.0
images = [Image.open(x) for x in files]
widths, heights = zip(*(i.size for i in images))
# sheet is padded
total_width = 32 * 512
total_height = 8 * 512
new_im = Image.new('RGBA', (total_width, total_height))
x_offset = 0
y_offset = 0
count = 0
for im in images:
new_im.paste(im, (x_offset, y_offset))
count = count + 1
if count % 8 == 0:
y_offset = 0
x_offset += im.size[0]
else:
y_offset += im.size[1]
new_im.save(f"{modelDir}/../public/assets/duck-{skin_name}-spritesheet.png")
renderDuck("white")
renderDuck("mallard")
renderDuck("brown")
renderDuck("mandarin")
renderDuck("duckling")
print(f"Render complete. {images_created} images rendered.") | 39.642045 | 118 | 0.694138 | import sys
import math
import pathlib
import bpy
import mathutils
from PIL import Image
modelDir = pathlib.Path(__file__).parent.absolute()
scn = bpy.context.scene
images_created = 0
def update_camera(camera, focus_point=mathutils.Vector((0.0, 0.0, 0.0)), distance=10.0):
looking_direction = camera.location - focus_point
rot_quat = looking_direction.to_track_quat('Z', 'Y')
camera.rotation_euler = rot_quat.to_euler()
camera.rotation_euler[0] = math.radians(54.736)
def render_direction(direction_name, camera_x, camera_y):
global images_created
filepath = f"{modelDir}/renders/{images_created}.png"
camera_object_name = f"CameraObj-{direction_name}"
cam_obj = bpy.data.objects.get(camera_object_name)
if (not cam_obj):
cam = bpy.data.cameras.new(f"Camera-{direction_name}")
cam.lens = 18
cam.type = 'ORTHO'
cam.ortho_scale = 1.4
cam_obj = bpy.data.objects.new(camera_object_name, cam)
cam_obj.location = (camera_x, camera_y, 0.5)
cam_obj.rotation_euler = (0, 0, 0)
scn.collection.objects.link(cam_obj)
update_camera(cam_obj)
scn.camera = cam_obj
bpy.context.scene.render.filepath = filepath
bpy.ops.render.render(animation=False, write_still=True,
use_viewport=False, layer='', scene='')
images_created = images_created + 1
return filepath
def render_frames(files):
offset = 0.4
files.append(render_direction("W", -offset, 0))
files.append(render_direction("NW", -offset, -offset))
files.append(render_direction("N", 0, -offset))
files.append(render_direction("NE", offset, -offset))
files.append(render_direction("E", offset, 0))
files.append(render_direction("SE", offset, offset))
files.append(render_direction("S", 0, offset))
files.append(render_direction("SW", -offset, offset))
def renderDuck(skin_name):
body_texture_image = bpy.data.images[f"duck-texture-{skin_name}"]
body_material = bpy.data.materials.get("duck-body")
body_bsdf = body_material.node_tree.nodes["Principled BSDF"]
body_shader_node_texture_image = body_material.node_tree.nodes.new('ShaderNodeTexImage')
body_shader_node_texture_image.image = body_texture_image
body_material.node_tree.links.new(body_bsdf.inputs['Base Color'], body_shader_node_texture_image.outputs['Color'])
wing_texture_image = bpy.data.images[f"duck-wing-texture-{skin_name}"]
wing_material = bpy.data.materials.get("duck-wing")
wing_bsdf = wing_material.node_tree.nodes["Principled BSDF"]
wing_shader_node_texture_image = wing_material.node_tree.nodes.new('ShaderNodeTexImage')
wing_shader_node_texture_image.image = wing_texture_image
wing_material.node_tree.links.new(wing_bsdf.inputs['Base Color'], wing_shader_node_texture_image.outputs['Color'])
files = []
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 0.0
images = [Image.open(x) for x in files]
widths, heights = zip(*(i.size for i in images))
total_width = 32 * 512
total_height = 8 * 512
new_im = Image.new('RGBA', (total_width, total_height))
x_offset = 0
y_offset = 0
count = 0
for im in images:
new_im.paste(im, (x_offset, y_offset))
count = count + 1
if count % 8 == 0:
y_offset = 0
x_offset += im.size[0]
else:
y_offset += im.size[1]
new_im.save(f"{modelDir}/../public/assets/duck-{skin_name}-spritesheet.png")
renderDuck("white")
renderDuck("mallard")
renderDuck("brown")
renderDuck("mandarin")
renderDuck("duckling")
print(f"Render complete. {images_created} images rendered.") | true | true |
f71cbc5a7db50b299b464568fe69775d801e45e9 | 1,650 | py | Python | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 47 | 2015-06-08T20:34:18.000Z | 2021-09-26T17:59:06.000Z | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 65 | 2015-07-27T18:16:31.000Z | 2021-10-04T14:02:51.000Z | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 13 | 2015-07-27T13:27:03.000Z | 2022-03-15T02:18:10.000Z | from random import randint
from timeit import timeit
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
def generate_dataset(n_inst, n_attr, n_val):
instances = []
for i in range(n_inst):
i = {}
for j in range(n_attr):
i[str(j)] = randint(1, n_val)
instances.append(i)
return instances
def time(n_inst, n_attr, n_val):
return timeit('tree.fit(x)',
setup=('from __main__ import generate_dataset; '
'from concept_formation.cobweb import CobwebTree; '
'tree = CobwebTree(); '
'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,
n_val)),
number=1)
if __name__ == "__main__":
# 5 attributes
sizes = [10, 30, 60, 120, 180, 220]
times = [time(i, 5, 5) for i in sizes]
plt.plot(sizes, times, 'ro')
plt.plot(sizes, times, 'r-')
# 10 attributes
times = [time(i, 10, 5) for i in sizes]
plt.plot(sizes, times, 'bo')
plt.plot(sizes, times, 'b-')
# 20 attributes
times = [time(i, 20, 5) for i in sizes]
plt.plot(sizes, times, 'go')
plt.plot(sizes, times, 'g-')
red_patch = mpatches.Patch(color='red', label='# attr=5')
blue_patch = mpatches.Patch(color='blue', label='# attr=10')
green_patch = mpatches.Patch(color='green', label='# attr=20')
plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)
plt.xlabel('Number of training instances (5 possible values / attr)')
plt.ylabel('Runtime in Seconds')
plt.show()
| 31.132075 | 78 | 0.569697 | from random import randint
from timeit import timeit
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
def generate_dataset(n_inst, n_attr, n_val):
instances = []
for i in range(n_inst):
i = {}
for j in range(n_attr):
i[str(j)] = randint(1, n_val)
instances.append(i)
return instances
def time(n_inst, n_attr, n_val):
return timeit('tree.fit(x)',
setup=('from __main__ import generate_dataset; '
'from concept_formation.cobweb import CobwebTree; '
'tree = CobwebTree(); '
'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,
n_val)),
number=1)
if __name__ == "__main__":
sizes = [10, 30, 60, 120, 180, 220]
times = [time(i, 5, 5) for i in sizes]
plt.plot(sizes, times, 'ro')
plt.plot(sizes, times, 'r-')
times = [time(i, 10, 5) for i in sizes]
plt.plot(sizes, times, 'bo')
plt.plot(sizes, times, 'b-')
times = [time(i, 20, 5) for i in sizes]
plt.plot(sizes, times, 'go')
plt.plot(sizes, times, 'g-')
red_patch = mpatches.Patch(color='red', label='# attr=5')
blue_patch = mpatches.Patch(color='blue', label='# attr=10')
green_patch = mpatches.Patch(color='green', label='# attr=20')
plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)
plt.xlabel('Number of training instances (5 possible values / attr)')
plt.ylabel('Runtime in Seconds')
plt.show()
| true | true |
f71cbc803e6c23ac267127d39b3cacff5df2afb2 | 1,759 | py | Python | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | import starlette
from starlette.middleware import Middleware
from starlette.routing import Match
from ddtrace import config
from ddtrace.contrib.asgi.middleware import TraceMiddleware
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils.wrappers import unwrap as _u
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
log = get_logger(__name__)
config._add(
"starlette",
dict(
_default_service="starlette",
request_span_name="starlette.request",
distributed_tracing=True,
aggregate_resources=True,
),
)
def get_resource(scope):
path = None
routes = scope["app"].routes
for route in routes:
match, _ = route.matches(scope)
if match == Match.FULL:
path = route.path
break
elif match == Match.PARTIAL and path is None:
path = route.path
return path
def span_modifier(span, scope):
resource = get_resource(scope)
if config.starlette["aggregate_resources"] and resource:
span.resource = "{} {}".format(scope["method"], resource)
def traced_init(wrapped, instance, args, kwargs):
mw = kwargs.pop("middleware", [])
mw.insert(0, Middleware(TraceMiddleware, integration_config=config.starlette, span_modifier=span_modifier))
kwargs.update({"middleware": mw})
wrapped(*args, **kwargs)
def patch():
if getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", True)
_w("starlette.applications", "Starlette.__init__", traced_init)
def unpatch():
if not getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", False)
_u(starlette.applications.Starlette, "__init__")
| 25.867647 | 111 | 0.69585 | import starlette
from starlette.middleware import Middleware
from starlette.routing import Match
from ddtrace import config
from ddtrace.contrib.asgi.middleware import TraceMiddleware
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils.wrappers import unwrap as _u
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
log = get_logger(__name__)
config._add(
"starlette",
dict(
_default_service="starlette",
request_span_name="starlette.request",
distributed_tracing=True,
aggregate_resources=True,
),
)
def get_resource(scope):
path = None
routes = scope["app"].routes
for route in routes:
match, _ = route.matches(scope)
if match == Match.FULL:
path = route.path
break
elif match == Match.PARTIAL and path is None:
path = route.path
return path
def span_modifier(span, scope):
resource = get_resource(scope)
if config.starlette["aggregate_resources"] and resource:
span.resource = "{} {}".format(scope["method"], resource)
def traced_init(wrapped, instance, args, kwargs):
mw = kwargs.pop("middleware", [])
mw.insert(0, Middleware(TraceMiddleware, integration_config=config.starlette, span_modifier=span_modifier))
kwargs.update({"middleware": mw})
wrapped(*args, **kwargs)
def patch():
if getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", True)
_w("starlette.applications", "Starlette.__init__", traced_init)
def unpatch():
if not getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", False)
_u(starlette.applications.Starlette, "__init__")
| true | true |
f71cbe052a1401c87b58ad7ee12061265e925398 | 3,707 | py | Python | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | 1 | 2019-08-19T10:00:55.000Z | 2019-08-19T10:00:55.000Z | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class McDonaldsHUSpider(scrapy.Spider):
name = "mcdonalds_hu"
allowed_domains = ["www.mcdonalds.hu"]
start_urls = (
'https://www.mcdonalds.hu/ettermeink',
)
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa', 'Su']
day_hours = data.xpath('.//div[@class="grid__item one-half text--right"]//text()').extract()
index = 0
for day_hour in day_hours:
day_hour = day_hour.strip()
if index == 7:
break
hours = ''
match = re.search(r'([0-9]{1,2}):([0-9]{1,2})–([0-9]{1,2}):([0-9]{1,2})', day_hour)
if not match:
hours = "off"
else:
sh, sm, eh, em = match.groups()
hours = '{}:{}-{}:{}'.format(sh, sm, int(eh) + 12 if int(eh) < 12 else int(eh), em)
short_day = weekdays[index]
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
index = index + 1
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_latlon(self, data):
map_url = data.xpath('//a[@title="Mutatás a térképen"]/@href').extract_first().strip()
lat_lon = map_url.split("loc:")[1]
lat = lat_lon.split(",")[0]
lon = lat_lon.split(",")[1]
return lat, lon
def parse_store(self, response):
address = response.xpath('//h1[@class="text--uppercase"]/text()').extract_first()
phone = response.xpath('//a[@title="Telefonszám"]/text()').extract_first()
lat, lon = self.parse_latlon(response)
properties = {
'ref': response.meta['ref'],
'phone': phone.strip() if phone else "",
'lon': lon,
'lat': lat,
'name': "McDonald's",
'addr_full': address.strip() if address else ""
}
opening_hours = self.store_hours(response)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
def parse(self, response):
results = response.xpath('//article')
for item in results:
ref_id = item.xpath('.//footer/a/@href').extract_first().strip()
ref_id = ref_id.split("/")[2]
yield scrapy.Request(response.urljoin('https://www.mcdonalds.hu/ettermeink/' + ref_id), meta={'ref':ref_id}, callback=self.parse_store)
| 34.324074 | 147 | 0.514702 |
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class McDonaldsHUSpider(scrapy.Spider):
name = "mcdonalds_hu"
allowed_domains = ["www.mcdonalds.hu"]
start_urls = (
'https://www.mcdonalds.hu/ettermeink',
)
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa', 'Su']
day_hours = data.xpath('.//div[@class="grid__item one-half text--right"]//text()').extract()
index = 0
for day_hour in day_hours:
day_hour = day_hour.strip()
if index == 7:
break
hours = ''
match = re.search(r'([0-9]{1,2}):([0-9]{1,2})–([0-9]{1,2}):([0-9]{1,2})', day_hour)
if not match:
hours = "off"
else:
sh, sm, eh, em = match.groups()
hours = '{}:{}-{}:{}'.format(sh, sm, int(eh) + 12 if int(eh) < 12 else int(eh), em)
short_day = weekdays[index]
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
index = index + 1
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_latlon(self, data):
map_url = data.xpath('//a[@title="Mutatás a térképen"]/@href').extract_first().strip()
lat_lon = map_url.split("loc:")[1]
lat = lat_lon.split(",")[0]
lon = lat_lon.split(",")[1]
return lat, lon
def parse_store(self, response):
address = response.xpath('//h1[@class="text--uppercase"]/text()').extract_first()
phone = response.xpath('//a[@title="Telefonszám"]/text()').extract_first()
lat, lon = self.parse_latlon(response)
properties = {
'ref': response.meta['ref'],
'phone': phone.strip() if phone else "",
'lon': lon,
'lat': lat,
'name': "McDonald's",
'addr_full': address.strip() if address else ""
}
opening_hours = self.store_hours(response)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
def parse(self, response):
results = response.xpath('//article')
for item in results:
ref_id = item.xpath('.//footer/a/@href').extract_first().strip()
ref_id = ref_id.split("/")[2]
yield scrapy.Request(response.urljoin('https://www.mcdonalds.hu/ettermeink/' + ref_id), meta={'ref':ref_id}, callback=self.parse_store)
| true | true |
f71cbe39c1107e8c3db2f02071238dd85d13bb46 | 8,346 | py | Python | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | 5 | 2020-05-20T16:59:04.000Z | 2021-08-22T18:30:47.000Z | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | 10 | 2020-05-20T16:07:04.000Z | 2020-07-22T19:21:16.000Z | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from re import sub
from typing import Any, List, Text
from functools import reduce
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
import string
class CheckEnglish(Component):
provides = ["text"]
alphabet = ['a', 'b', 'c', 'd']
stopwords = ["a","about","above","after","again","against","ain","all","am","an","and","any","are","aren","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can","couldn","couldn't","d","did","didn","didn't","do","does","doesn","doesn't","doing","don","don't","down","during","each","few","for","from","further","had","hadn","hadn't","has","hasn","hasn't","have","haven","haven't","having","he","her","here","hers","herself","him","himself","his","how","i","if","in","into","is","isn","isn't","it","it's","its","itself","just","ll","m","ma","me","mightn","mightn't","more","most","mustn","mustn't","my","myself","needn","needn't","no","nor","not","now","o","of","off","on","once","only","or","other","our","ours","ourselves","out","over","own","re","s","same","shan","shan't","she","she's","should","should've","shouldn","shouldn't","so","some","such","t","than","that","that'll","the","their","theirs","them","themselves","then","there","these","they","this","those","through","to","too","under","until","up","ve","very","was","wasn","wasn't","we","were","weren","weren't","what","when","where","which","while","who","whom","why","will","with","won","won't","wouldn","wouldn't","y","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves","could","he'd","he'll","he's","here's","how's","i'd","i'll","i'm","i've","let's","ought","she'd","she'll","that's","there's","they'd","they'll","they're","they've","we'd","we'll","we're","we've","what's","when's","where's","who's","why's","would","able","abst","accordance","according","accordingly","across","act","actually","added","adj","affected","affecting","affects","afterwards","ah","almost","alone","along","already","also","although","always","among","amongst","announce","another","anybody","anyhow","anymore","anyone","anything","anyway","anyways","anywhere","apparently","approximately","arent","arise","around","aside","ask","asking","auth","available","away","awfully","b","back","became","become","becomes","becoming","beforehand","begin","beginning","beginnings","begins","behind","believe","beside","besides","beyond","biol","brief","briefly","c","ca","came","cannot","can't","cause","causes","certain","certainly","co","com","come","comes","contain","containing","contains","couldnt","date","different","done","downwards","due","e","ed","edu","effect","eg","eight","eighty","either","else","elsewhere","end","ending","enough","especially","et","etc","even","ever","every","everybody","everyone","everything","everywhere","ex","except","f","far","ff","fifth","first","five","fix","followed","following","follows","former","formerly","forth","found","four","furthermore","g","gave","get","gets","getting","give","given","gives","giving","go","goes","gone","got","gotten","h","happens","hardly","hed","hence","hereafter","hereby","herein","heres","hereupon","hes","hi","hid","hither","home","howbeit","however","hundred","id","ie","im","immediate","immediately","importance","important","inc","indeed","index","information","instead","invention","inward","itd","it'll","j","k","keep","keeps","kept","kg","km","know","known","knows","l","largely","last","lately","later","latter","latterly","least","less","lest","let","lets","like","liked","likely","line","little","'ll","look","looking","looks","ltd","made","mainly","make","makes","many","may","maybe","mean","means","meantime","meanwhile","merely","mg","might","million","miss","ml","moreover","mostly","mr","mrs","much","mug","must","n","na","name","namely","nay","nd","near","nearly","necessarily","necessary","need","needs","neither","never","nevertheless","new","next","nine","ninety","nobody","non","none","nonetheless","noone","normally","nos","noted","nothing","nowhere","obtain","obtained","obviously","often","oh","ok","okay","old","omitted","one","ones","onto","ord","others","otherwise","outside","overall","owing","p","page","pages","part","particular","particularly","past","per","perhaps","placed","please","plus","poorly","possible","possibly","potentially","pp","predominantly","present","previously","primarily","probably","promptly","proud","provides","put","q","que","quickly","quite","qv","r","ran","rather","rd","readily","really","recent","recently","ref","refs","regarding","regardless","regards","related","relatively","research","respectively","resulted","resulting","results","right","run","said","saw","say","saying","says","sec","section","see","seeing","seem","seemed","seeming","seems","seen","self","selves","sent","seven","several","shall","shed","shes","show","showed","shown","showns","shows","significant","significantly","similar","similarly","since","six","slightly","somebody","somehow","someone","somethan","something","sometime","sometimes","somewhat","somewhere","soon","sorry","specifically","specified","specify","specifying","still","stop","strongly","sub","substantially","successfully","sufficiently","suggest","sup","sure","take","taken","taking","tell","tends","th","thank","thanks","thanx","thats","that've","thence","thereafter","thereby","thered","therefore","therein","there'll","thereof","therere","theres","thereto","thereupon","there've","theyd","theyre","think","thou","though","thoughh","thousand","throug","throughout","thru","thus","til","tip","together","took","toward","towards","tried","tries","truly","try","trying","ts","twice","two","u","un","unfortunately","unless","unlike","unlikely","unto","upon","ups","us","use","used","useful","usefully","usefulness","uses","using","usually","v","value","various","'ve","via","viz","vol","vols","vs","w","want","wants","wasnt","way","wed","welcome","went","werent","whatever","what'll","whats","whence","whenever","whereafter","whereas","whereby","wherein","wheres","whereupon","wherever","whether","whim","whither","whod","whoever","whole","who'll","whomever","whos","whose","widely","willing","wish","within","without","wont","words","world","wouldnt","www","x","yes","yet","youd","youre","z","zero","a's","ain't","allow","allows","apart","appear","appreciate","appropriate","associated","best","better","c'mon","c's","cant","changes","clearly","concerning","consequently","consider","considering","corresponding","course","currently","definitely","described","despite","entirely","exactly","example","going","greetings","hello","help","hopefully","ignored","inasmuch","indicate","indicated","indicates","inner","insofar","it'd","keep","keeps","novel","presumably","reasonably","second","secondly","sensible","serious","seriously","sure","t's","third","thorough","thoroughly","three","well","wonder"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
for example in training_data.training_examples:
example.text = self.preprocess(example.text)
example.set("text", example.text)
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.text = self.preprocess(message.get('text'))
message.set("text", message.text)
def english_word_count(self, word):
alph = list(string.ascii_lowercase)
count = 0
for ch in word:
if ch in alph:
count += 1
return count
def preprocess(self, text):
text = text.lower()
alph = list(string.ascii_lowercase)
new_text = ''
for word in text.split():
count = self.english_word_count(word)
if word in self.stopwords:
continue
if count / len(word) > 0.6:
new_text += word + ' '
return new_text[:-1] | 107 | 6,589 | 0.619458 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from re import sub
from typing import Any, List, Text
from functools import reduce
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
import string
class CheckEnglish(Component):
provides = ["text"]
alphabet = ['a', 'b', 'c', 'd']
stopwords = ["a","about","above","after","again","against","ain","all","am","an","and","any","are","aren","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can","couldn","couldn't","d","did","didn","didn't","do","does","doesn","doesn't","doing","don","don't","down","during","each","few","for","from","further","had","hadn","hadn't","has","hasn","hasn't","have","haven","haven't","having","he","her","here","hers","herself","him","himself","his","how","i","if","in","into","is","isn","isn't","it","it's","its","itself","just","ll","m","ma","me","mightn","mightn't","more","most","mustn","mustn't","my","myself","needn","needn't","no","nor","not","now","o","of","off","on","once","only","or","other","our","ours","ourselves","out","over","own","re","s","same","shan","shan't","she","she's","should","should've","shouldn","shouldn't","so","some","such","t","than","that","that'll","the","their","theirs","them","themselves","then","there","these","they","this","those","through","to","too","under","until","up","ve","very","was","wasn","wasn't","we","were","weren","weren't","what","when","where","which","while","who","whom","why","will","with","won","won't","wouldn","wouldn't","y","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves","could","he'd","he'll","he's","here's","how's","i'd","i'll","i'm","i've","let's","ought","she'd","she'll","that's","there's","they'd","they'll","they're","they've","we'd","we'll","we're","we've","what's","when's","where's","who's","why's","would","able","abst","accordance","according","accordingly","across","act","actually","added","adj","affected","affecting","affects","afterwards","ah","almost","alone","along","already","also","although","always","among","amongst","announce","another","anybody","anyhow","anymore","anyone","anything","anyway","anyways","anywhere","apparently","approximately","arent","arise","around","aside","ask","asking","auth","available","away","awfully","b","back","became","become","becomes","becoming","beforehand","begin","beginning","beginnings","begins","behind","believe","beside","besides","beyond","biol","brief","briefly","c","ca","came","cannot","can't","cause","causes","certain","certainly","co","com","come","comes","contain","containing","contains","couldnt","date","different","done","downwards","due","e","ed","edu","effect","eg","eight","eighty","either","else","elsewhere","end","ending","enough","especially","et","etc","even","ever","every","everybody","everyone","everything","everywhere","ex","except","f","far","ff","fifth","first","five","fix","followed","following","follows","former","formerly","forth","found","four","furthermore","g","gave","get","gets","getting","give","given","gives","giving","go","goes","gone","got","gotten","h","happens","hardly","hed","hence","hereafter","hereby","herein","heres","hereupon","hes","hi","hid","hither","home","howbeit","however","hundred","id","ie","im","immediate","immediately","importance","important","inc","indeed","index","information","instead","invention","inward","itd","it'll","j","k","keep","keeps","kept","kg","km","know","known","knows","l","largely","last","lately","later","latter","latterly","least","less","lest","let","lets","like","liked","likely","line","little","'ll","look","looking","looks","ltd","made","mainly","make","makes","many","may","maybe","mean","means","meantime","meanwhile","merely","mg","might","million","miss","ml","moreover","mostly","mr","mrs","much","mug","must","n","na","name","namely","nay","nd","near","nearly","necessarily","necessary","need","needs","neither","never","nevertheless","new","next","nine","ninety","nobody","non","none","nonetheless","noone","normally","nos","noted","nothing","nowhere","obtain","obtained","obviously","often","oh","ok","okay","old","omitted","one","ones","onto","ord","others","otherwise","outside","overall","owing","p","page","pages","part","particular","particularly","past","per","perhaps","placed","please","plus","poorly","possible","possibly","potentially","pp","predominantly","present","previously","primarily","probably","promptly","proud","provides","put","q","que","quickly","quite","qv","r","ran","rather","rd","readily","really","recent","recently","ref","refs","regarding","regardless","regards","related","relatively","research","respectively","resulted","resulting","results","right","run","said","saw","say","saying","says","sec","section","see","seeing","seem","seemed","seeming","seems","seen","self","selves","sent","seven","several","shall","shed","shes","show","showed","shown","showns","shows","significant","significantly","similar","similarly","since","six","slightly","somebody","somehow","someone","somethan","something","sometime","sometimes","somewhat","somewhere","soon","sorry","specifically","specified","specify","specifying","still","stop","strongly","sub","substantially","successfully","sufficiently","suggest","sup","sure","take","taken","taking","tell","tends","th","thank","thanks","thanx","thats","that've","thence","thereafter","thereby","thered","therefore","therein","there'll","thereof","therere","theres","thereto","thereupon","there've","theyd","theyre","think","thou","though","thoughh","thousand","throug","throughout","thru","thus","til","tip","together","took","toward","towards","tried","tries","truly","try","trying","ts","twice","two","u","un","unfortunately","unless","unlike","unlikely","unto","upon","ups","us","use","used","useful","usefully","usefulness","uses","using","usually","v","value","various","'ve","via","viz","vol","vols","vs","w","want","wants","wasnt","way","wed","welcome","went","werent","whatever","what'll","whats","whence","whenever","whereafter","whereas","whereby","wherein","wheres","whereupon","wherever","whether","whim","whither","whod","whoever","whole","who'll","whomever","whos","whose","widely","willing","wish","within","without","wont","words","world","wouldnt","www","x","yes","yet","youd","youre","z","zero","a's","ain't","allow","allows","apart","appear","appreciate","appropriate","associated","best","better","c'mon","c's","cant","changes","clearly","concerning","consequently","consider","considering","corresponding","course","currently","definitely","described","despite","entirely","exactly","example","going","greetings","hello","help","hopefully","ignored","inasmuch","indicate","indicated","indicates","inner","insofar","it'd","keep","keeps","novel","presumably","reasonably","second","secondly","sensible","serious","seriously","sure","t's","third","thorough","thoroughly","three","well","wonder"]
def train(self, training_data, config, **kwargs):
for example in training_data.training_examples:
example.text = self.preprocess(example.text)
example.set("text", example.text)
def process(self, message, **kwargs):
message.text = self.preprocess(message.get('text'))
message.set("text", message.text)
def english_word_count(self, word):
alph = list(string.ascii_lowercase)
count = 0
for ch in word:
if ch in alph:
count += 1
return count
def preprocess(self, text):
text = text.lower()
alph = list(string.ascii_lowercase)
new_text = ''
for word in text.split():
count = self.english_word_count(word)
if word in self.stopwords:
continue
if count / len(word) > 0.6:
new_text += word + ' '
return new_text[:-1] | true | true |
f71cbe9a6893b097ff92eef32e8a4f740fdc19a0 | 13,481 | py | Python | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# pylint: disable
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from wheat.types.blockchain_format.program import Program, SerializedProgram
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.hash import std_hash
from wheat.util.ints import int64, int512, uint32, uint64, uint128
from wheat.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
# TODO: Remove hack, this allows streaming these objects from binary
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
PrivateKey,
G1Element,
G2Element,
Program,
SerializedProgram,
]
# JSON does not support big ints, so these types must be serialized differently in JSON
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
"""
Converts a dictionary based on a dataclass, into an instance of that dataclass.
Recursively goes through lists, optionals, and dictionaries.
"""
if is_type_SpecificOptional(klass):
# Type is optional, data is either None, or Any
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
# Type is tuple, can have multiple different types inside
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
# Type is a dataclass, data is a dictionary
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
# Type is a list, data is a list
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
# Type is bytes, data is a hex string
return klass(hexstr_to_bytes(d))
elif klass in unhashable_types:
# Type is unhashable (bls type), so cast from hex string
return klass.from_bytes(hexstr_to_bytes(d))
else:
# Type is a primitive, cast with correct class
return klass(d)
def recurse_jsonify(d):
"""
Makes bytes objects and unhashable types into strings with 0x, and makes large ints into
strings.
"""
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item) in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value) in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
"""
This is a decorator for class definitions. It applies the strictdataclass decorator,
which checks all types at construction. It also defines a simple serialization format,
and adds parse, from bytes, stream, and __bytes__ methods.
Serialization format:
- Each field is serialized in order, by calling from_bytes/__bytes__.
- For Lists, there is a 4 byte prefix for the list length.
- For Optionals, there is a one byte prefix, 1 iff object is present, 0 iff not.
All of the constituents must have parse/from_bytes, and stream/__bytes__ and therefore
be of fixed size. For example, int cannot be a constituent since it is not a fixed size,
whereas uint32 can be.
Furthermore, a get_hash() member is added, which performs a serialization and a sha256.
This class is used for deterministic serialization and hashing, for consensus critical
objects such as the block header.
Make sure to use the Streamable class as a parent class when using the streamable decorator,
as it will allow linters to recognize the methods that are added by the decorator. Also,
use the @dataclass(frozen=True) decorator as well, for linters to recognize constructor
arguments.
"""
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1 # Checks for EOF
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1 # Checks for EOF
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size: uint32 = uint32(int.from_bytes(list_size_bytes, "big"))
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
# wjb assert inner_type != get_args(List)[0]
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size = uint32(int.from_bytes(list_size_bytes, "big"))
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size_bytes = f.read(4)
assert str_size_bytes is not None and len(str_size_bytes) == 4 # Checks for EOF
str_size: uint32 = uint32(int.from_bytes(str_size_bytes, "big"))
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size # Checks for EOF
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type): # type: ignore
"""
This function returns a function taking one argument `f: BinaryIO` that parses
and returns a value of the given type.
"""
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__: # type: ignore
# Create the object without calling __init__() to avoid unnecessary post-init checks in strictdataclass
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
# Use -1 as a sentinel value as it's not currently serializable
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
f.write(uint32(len(item)).to_bytes(4, "big"))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
f.write(uint32(len(item)).to_bytes(4, "big"))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
f.write(uint32(len(str_bytes)).to_bytes(4, "big"))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict)
| 37.551532 | 111 | 0.649952 |
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from wheat.types.blockchain_format.program import Program, SerializedProgram
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.hash import std_hash
from wheat.util.ints import int64, int512, uint32, uint64, uint128
from wheat.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
PrivateKey,
G1Element,
G2Element,
Program,
SerializedProgram,
]
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
if is_type_SpecificOptional(klass):
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
return klass(hexstr_to_bytes(d))
elif klass in unhashable_types:
return klass.from_bytes(hexstr_to_bytes(d))
else:
return klass(d)
def recurse_jsonify(d):
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item) in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value) in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4
list_size: uint32 = uint32(int.from_bytes(list_size_bytes, "big"))
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4
list_size = uint32(int.from_bytes(list_size_bytes, "big"))
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size_bytes = f.read(4)
assert str_size_bytes is not None and len(str_size_bytes) == 4
str_size: uint32 = uint32(int.from_bytes(str_size_bytes, "big"))
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type):
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__:
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
f.write(uint32(len(item)).to_bytes(4, "big"))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
f.write(uint32(len(item)).to_bytes(4, "big"))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
f.write(uint32(len(str_bytes)).to_bytes(4, "big"))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict)
| true | true |
f71cbf4460d98bc10c011e9a945b70eb738776be | 853 | py | Python | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | 1 | 2021-08-09T15:50:53.000Z | 2021-08-09T15:50:53.000Z | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | null | null | null | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | null | null | null | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name= 'dicomcat',
version= '0.1',
description='A simple python-based tool based on imgcat for displaying DICOM files in iTerm2.',
long_description_content_type='text/markdown',
long_description=long_description,
url='https://github.com/rwindsor1/DICOMcat',
author ='Rhydian Windsor',
author_email= 'windsorrhydian@gmail.com',
license= 'MIT',
packages=['dicomcat'],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': ['dicomcat=dicomcat.cli:show_dicom']
},
include_package_data=True,
ip_safe=False)
| 34.12 | 101 | 0.681125 | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name= 'dicomcat',
version= '0.1',
description='A simple python-based tool based on imgcat for displaying DICOM files in iTerm2.',
long_description_content_type='text/markdown',
long_description=long_description,
url='https://github.com/rwindsor1/DICOMcat',
author ='Rhydian Windsor',
author_email= 'windsorrhydian@gmail.com',
license= 'MIT',
packages=['dicomcat'],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': ['dicomcat=dicomcat.cli:show_dicom']
},
include_package_data=True,
ip_safe=False)
| true | true |
f71cbfad5d23526173717cefa4699e471cc4b889 | 522 | py | Python | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 2 | 2020-03-16T14:57:44.000Z | 2020-11-29T07:45:54.000Z | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | null | null | null | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 1 | 2020-08-13T07:59:02.000Z | 2020-08-13T07:59:02.000Z | try:
f = open("myfile","w")
a,b = [int(x) for x in input("Enter two numbers:").split()]
c = a/b
f.write("Writing %d into file" %c)
except ZeroDivisionError:
print("Division by zero is not allowed")
print("Please enter a non zero number")
finally:
f.close() # Writing f.close() in finally block because whether the error appears or not, we always want to close the file, so we will use f.close() in finally.
print("File Closed")
print("Code after that exception") | 37.285714 | 189 | 0.632184 | try:
f = open("myfile","w")
a,b = [int(x) for x in input("Enter two numbers:").split()]
c = a/b
f.write("Writing %d into file" %c)
except ZeroDivisionError:
print("Division by zero is not allowed")
print("Please enter a non zero number")
finally:
f.close()
print("File Closed")
print("Code after that exception") | true | true |
f71cbfefe5963b92d2e1699d24dfdedb87ab4f03 | 1,191 | py | Python | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 1,223 | 2019-10-25T12:35:46.000Z | 2022-03-30T02:08:54.000Z | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 425 | 2019-10-27T21:12:15.000Z | 2022-03-31T17:47:57.000Z | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 87 | 2019-10-26T17:41:23.000Z | 2022-02-05T23:32:04.000Z | # Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import json
from functools import partial
import time
import jax.numpy as np
import jax.random as random
from jax import jit
from jax.config import config
config.enable_omnistaging()
# warm up
np.dot(1.0, 1.0)
def benchit(bench_name, x, f):
f_jitted = jit(f)
t0 = time.time()
f_jitted(x).block_until_ready()
t1 = time.time()
f_jitted(x).block_until_ready()
t2 = time.time()
run_time = t2 - t1
compile_time = t1 - t0 - run_time
print(json.dumps(
{"bench_name" : bench_name,
"compile_time" : compile_time,
"run_time" : run_time}))
@partial(benchit, "sum", 0)
def sum_bench(key):
xs = random.normal(random.PRNGKey(key), shape=(10000,))
return np.sum(xs[:, None] + xs[None, :], axis=0)
@partial(benchit, "gaussian", 0)
def gaussian_bench(key):
return random.normal(random.PRNGKey(key), shape=(100000000,))
@partial(benchit, "matmul", 0)
def matmul_bench(key):
mat = random.normal(random.PRNGKey(key), shape=(1000, 1000))
return np.dot(mat, mat)
| 24.8125 | 63 | 0.699412 |
import json
from functools import partial
import time
import jax.numpy as np
import jax.random as random
from jax import jit
from jax.config import config
config.enable_omnistaging()
np.dot(1.0, 1.0)
def benchit(bench_name, x, f):
f_jitted = jit(f)
t0 = time.time()
f_jitted(x).block_until_ready()
t1 = time.time()
f_jitted(x).block_until_ready()
t2 = time.time()
run_time = t2 - t1
compile_time = t1 - t0 - run_time
print(json.dumps(
{"bench_name" : bench_name,
"compile_time" : compile_time,
"run_time" : run_time}))
@partial(benchit, "sum", 0)
def sum_bench(key):
xs = random.normal(random.PRNGKey(key), shape=(10000,))
return np.sum(xs[:, None] + xs[None, :], axis=0)
@partial(benchit, "gaussian", 0)
def gaussian_bench(key):
return random.normal(random.PRNGKey(key), shape=(100000000,))
@partial(benchit, "matmul", 0)
def matmul_bench(key):
mat = random.normal(random.PRNGKey(key), shape=(1000, 1000))
return np.dot(mat, mat)
| true | true |
f71cc027fdd19119fb0399b5df5021a92a9837ac | 2,248 | py | Python | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 8 | 2017-04-19T03:59:43.000Z | 2020-04-29T00:29:12.000Z | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | null | null | null | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 3 | 2018-04-26T16:57:46.000Z | 2021-03-01T05:48:06.000Z | from os import path
from shutil import make_archive
import os
from json import load, dump
PLUGINEXTENSION = '.epf'
DESCRIPTIONNAME = 'description'
def packPluginFromFolder(folderPath):
folderPath = path.abspath(folderPath)
if not path.exists(folderPath):
raise FileNotFoundError('the folder does not exist.')
if not path.isdir(folderPath):
raise NotADirectoryError('folderPath must be a directory with files.')
parentFolder = path.abspath(path.join(folderPath, path.pardir))
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
zipTitle = folderPath
finalName = zipTitle + PLUGINEXTENSION
make_archive(zipTitle, 'gztar', folderPath, './')
os.rename(zipTitle + '.tar.gz', finalName)
class PluginDescription(object):
def __init__(self, name='', description='', authorName='', authorEmail=''):
self.name = name
self.description = description
self.authorName = authorName
self.authorEmail = authorEmail
def __repr__(self):
return self.name
def _toDict(self):
d = dir(self)
dd = {v: getattr(self, v) for v in d if not v.startswith('_') and not callable(getattr(self, v))}
return dd
def saveToDisk(self, destFolder):
try:
finalPath = path.abspath(path.join(destFolder, DESCRIPTIONNAME + '.json'))
with open(finalPath, 'w') as dest:
dump(self._toDict(), dest, indent=4)
except:
raise
@staticmethod
def fromDisk(folderPath):
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
with open(descriptionPath) as desc:
data = load(desc)
description = PluginDescription(**data)
return description
class _Plugin(object):
def __init__(self, description, mainClass, pluginPath):
self.description = description
self.mainClass = mainClass
self.pluginPath = pluginPath
| 32.57971 | 105 | 0.666815 | from os import path
from shutil import make_archive
import os
from json import load, dump
PLUGINEXTENSION = '.epf'
DESCRIPTIONNAME = 'description'
def packPluginFromFolder(folderPath):
folderPath = path.abspath(folderPath)
if not path.exists(folderPath):
raise FileNotFoundError('the folder does not exist.')
if not path.isdir(folderPath):
raise NotADirectoryError('folderPath must be a directory with files.')
parentFolder = path.abspath(path.join(folderPath, path.pardir))
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
zipTitle = folderPath
finalName = zipTitle + PLUGINEXTENSION
make_archive(zipTitle, 'gztar', folderPath, './')
os.rename(zipTitle + '.tar.gz', finalName)
class PluginDescription(object):
def __init__(self, name='', description='', authorName='', authorEmail=''):
self.name = name
self.description = description
self.authorName = authorName
self.authorEmail = authorEmail
def __repr__(self):
return self.name
def _toDict(self):
d = dir(self)
dd = {v: getattr(self, v) for v in d if not v.startswith('_') and not callable(getattr(self, v))}
return dd
def saveToDisk(self, destFolder):
try:
finalPath = path.abspath(path.join(destFolder, DESCRIPTIONNAME + '.json'))
with open(finalPath, 'w') as dest:
dump(self._toDict(), dest, indent=4)
except:
raise
@staticmethod
def fromDisk(folderPath):
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
with open(descriptionPath) as desc:
data = load(desc)
description = PluginDescription(**data)
return description
class _Plugin(object):
def __init__(self, description, mainClass, pluginPath):
self.description = description
self.mainClass = mainClass
self.pluginPath = pluginPath
| true | true |
f71cc05cd87321ac0280e9c1dac9a793ff504e60 | 6,120 | py | Python | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | from .utils import TestCase
from .utils import build_and_test_module
class Test(TestCase):
def test_enums(self):
build_and_test_module('enums')
def test_invalid_string_enum_member_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = "s"\n',
' File "", line 3\n'
' A = "s"\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' V1, V2 = 1\n',
' File "", line 3\n'
' V1, V2 = 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_invalid_enum_member_value_plus_sign(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = +1\n',
' File "", line 3\n'
' A = +1\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_value_variable(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = b\n',
' File "", line 3\n'
' A = b\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_non_pascal_case_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' aB = 1\n',
' File "", line 3\n'
' aB = 1\n'
' ^\n'
"CompileError: enum member names must be pascal case\n")
def test_invalid_enum_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' 1 + 1\n',
' File "", line 3\n'
' 1 + 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_empty_enum_type(self):
self.assert_transpile_raises(
'@enum()\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum()\n'
' ^\n'
"CompileError: one parameter expected, got 0\n")
def test_bad_enum_type_f32(self):
self.assert_transpile_raises(
'@enum(f32)\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum(f32)\n'
' ^\n'
"CompileError: integer type expected, not 'f32'\n")
def test_enum_float_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(0.0))\n',
' File "", line 5\n'
' print(Foo(0.0))\n'
' ^\n'
"CompileError: cannot convert float to 'i64'\n")
def test_enum_too_many_parameters(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(1, 2))\n',
' File "", line 5\n'
' print(Foo(1, 2))\n'
' ^\n'
"CompileError: expected 1 parameter, got 2\n")
def test_not_enum(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(not Foo.A)\n',
' File "", line 5\n'
' print(not Foo.A)\n'
' ^\n'
"CompileError: expected a 'bool', got a 'foo.lib.Foo'\n")
def test_enum_member_value_lower_than_previous_1(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 0\n'
' B = -1\n',
' File "", line 4\n'
' B = -1\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_member_value_lower_than_previous_2(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A\n'
' B\n'
' C = 0\n',
' File "", line 5\n'
' C = 0\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_pascal_case(self):
self.assert_transpile_raises(
'@enum\n'
'class foo:\n'
' A\n',
' File "", line 2\n'
' class foo:\n'
' ^\n'
"CompileError: enum names must be pascal case\n")
def test_enum_bad_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' func a(self):\n'
' pass\n',
' File "", line 3\n'
' func a(self):\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_use_missing_enum_value_in_print(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' print(Foo.APA)\n',
' File "", line 5\n'
' print(Foo.APA)\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
def test_use_missing_enum_value_in_comparision(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' if Foo.APA == Foo.Apa:\n'
' pass\n',
' File "", line 5\n'
' if Foo.APA == Foo.Apa:\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
| 31.546392 | 79 | 0.427288 | from .utils import TestCase
from .utils import build_and_test_module
class Test(TestCase):
def test_enums(self):
build_and_test_module('enums')
def test_invalid_string_enum_member_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = "s"\n',
' File "", line 3\n'
' A = "s"\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' V1, V2 = 1\n',
' File "", line 3\n'
' V1, V2 = 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_invalid_enum_member_value_plus_sign(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = +1\n',
' File "", line 3\n'
' A = +1\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_value_variable(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = b\n',
' File "", line 3\n'
' A = b\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_non_pascal_case_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' aB = 1\n',
' File "", line 3\n'
' aB = 1\n'
' ^\n'
"CompileError: enum member names must be pascal case\n")
def test_invalid_enum_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' 1 + 1\n',
' File "", line 3\n'
' 1 + 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_empty_enum_type(self):
self.assert_transpile_raises(
'@enum()\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum()\n'
' ^\n'
"CompileError: one parameter expected, got 0\n")
def test_bad_enum_type_f32(self):
self.assert_transpile_raises(
'@enum(f32)\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum(f32)\n'
' ^\n'
"CompileError: integer type expected, not 'f32'\n")
def test_enum_float_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(0.0))\n',
' File "", line 5\n'
' print(Foo(0.0))\n'
' ^\n'
"CompileError: cannot convert float to 'i64'\n")
def test_enum_too_many_parameters(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(1, 2))\n',
' File "", line 5\n'
' print(Foo(1, 2))\n'
' ^\n'
"CompileError: expected 1 parameter, got 2\n")
def test_not_enum(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(not Foo.A)\n',
' File "", line 5\n'
' print(not Foo.A)\n'
' ^\n'
"CompileError: expected a 'bool', got a 'foo.lib.Foo'\n")
def test_enum_member_value_lower_than_previous_1(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 0\n'
' B = -1\n',
' File "", line 4\n'
' B = -1\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_member_value_lower_than_previous_2(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A\n'
' B\n'
' C = 0\n',
' File "", line 5\n'
' C = 0\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_pascal_case(self):
self.assert_transpile_raises(
'@enum\n'
'class foo:\n'
' A\n',
' File "", line 2\n'
' class foo:\n'
' ^\n'
"CompileError: enum names must be pascal case\n")
def test_enum_bad_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' func a(self):\n'
' pass\n',
' File "", line 3\n'
' func a(self):\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_use_missing_enum_value_in_print(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' print(Foo.APA)\n',
' File "", line 5\n'
' print(Foo.APA)\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
def test_use_missing_enum_value_in_comparision(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' if Foo.APA == Foo.Apa:\n'
' pass\n',
' File "", line 5\n'
' if Foo.APA == Foo.Apa:\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
| true | true |
f71cc0cae73f084599395e8d8ba1c44ef7ba93fe | 1,764 | py | Python | LogFileSetup.py | skw32/DefectCorrectionsNotebook | 7342bc6cafa4c19c774d48c4f68b02db7d2e2eb1 | [
"BSD-3-Clause"
] | 4 | 2019-03-05T01:04:30.000Z | 2020-05-19T13:07:20.000Z | LogFileSetup.py | lxf-gzu/DefectCorrectionsNotebook | fef2ede0afb27e35d8e69c1d8aa759df284dc149 | [
"BSD-3-Clause"
] | 1 | 2019-06-01T18:07:53.000Z | 2019-06-01T18:07:53.000Z | LogFileSetup.py | lxf-gzu/DefectCorrectionsNotebook | fef2ede0afb27e35d8e69c1d8aa759df284dc149 | [
"BSD-3-Clause"
] | 6 | 2019-03-26T18:38:23.000Z | 2020-05-21T07:07:33.000Z | import logging
def configure_logging(logfile_path):
''' Initialize logging defaults for in-notebook messages and
'log.info' file written to store intermediate results during analysis of each defect
To use, the following lines must be added to the code:
import LogFileSetup as lfs
logger = lfs.configure_logging(os.path.join(PATH-TO-LOGFILE-DIR, "log"))
Usage example in notebook: logger.info("MESSAGE")
'''
# Set default format for each line of log messages within notebook
notebook_formatter = logging.Formatter("[%(levelname)s] [Cell line num: %(lineno)s] %(message)s")
# Set default format for each line in log.info file (look into methods to outputt cell num, not just line num in cell)
# info_file_formatter = logging.Formatter("[%(levelname)s] [Notebook cell num: %(???)s] [Cell line num: %(lineno)s] %(message)s")
# Initialise log.info for defect processing information
defect_logger = logging.getLogger()
# For log.info file
info_file_handler = logging.FileHandler(logfile_path + ".info", mode='w')
info_file_handler.setLevel(logging.INFO)
# info_file_handler.setFormatter(info_file_formatter)
# For messages within notebook
notebook_handler = logging.StreamHandler()
notebook_handler.setLevel(logging.INFO)
notebook_handler.setFormatter(notebook_formatter)
# Remove default handlers and add custom ones (for log.info file and messages in notebooks)
list(map(defect_logger.removeHandler, defect_logger.handlers[:]))
list(map(defect_logger.removeFilter, defect_logger.filters[:]))
defect_logger.setLevel(logging.INFO)
defect_logger.addHandler(info_file_handler)
defect_logger.addHandler(notebook_handler)
return defect_logger
| 46.421053 | 132 | 0.740363 | import logging
def configure_logging(logfile_path):
notebook_formatter = logging.Formatter("[%(levelname)s] [Cell line num: %(lineno)s] %(message)s")
defect_logger = logging.getLogger()
info_file_handler = logging.FileHandler(logfile_path + ".info", mode='w')
info_file_handler.setLevel(logging.INFO)
notebook_handler = logging.StreamHandler()
notebook_handler.setLevel(logging.INFO)
notebook_handler.setFormatter(notebook_formatter)
list(map(defect_logger.removeHandler, defect_logger.handlers[:]))
list(map(defect_logger.removeFilter, defect_logger.filters[:]))
defect_logger.setLevel(logging.INFO)
defect_logger.addHandler(info_file_handler)
defect_logger.addHandler(notebook_handler)
return defect_logger
| true | true |
f71cc1efb366d21efb50b72a9d38ce6d8c3b520d | 1,439 | py | Python | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 1 | 2020-12-21T23:39:27.000Z | 2020-12-21T23:39:27.000Z | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 2 | 2021-12-10T01:45:26.000Z | 2021-12-10T01:58:04.000Z | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 1 | 2019-05-20T15:30:40.000Z | 2019-05-20T15:30:40.000Z | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="clustering_worker",
version="0.0.1",
url="https://github.com/chaoss/augur",
license='MIT',
author="Sarit Adhikari",
author_email="sarit.adhikari@gmail.com",
description="worker to cluster repository based on messages on issues and pull requests ",
packages=find_packages(),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'sklearn==0.0',
'numpy==1.19.5',
'nltk==3.5',
'seaborn==0.11.1',
'pandas==1.1.3',
'matplotlib==3.3.4'
],
entry_points={
'console_scripts': [
'clustering_worker_start=workers.clustering_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| 28.78 | 94 | 0.579569 | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="clustering_worker",
version="0.0.1",
url="https://github.com/chaoss/augur",
license='MIT',
author="Sarit Adhikari",
author_email="sarit.adhikari@gmail.com",
description="worker to cluster repository based on messages on issues and pull requests ",
packages=find_packages(),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'sklearn==0.0',
'numpy==1.19.5',
'nltk==3.5',
'seaborn==0.11.1',
'pandas==1.1.3',
'matplotlib==3.3.4'
],
entry_points={
'console_scripts': [
'clustering_worker_start=workers.clustering_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| true | true |
f71cc343a74ac1d719f7021173dac1e468df922f | 2,529 | py | Python | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | # This is an edited version of https://github.com/minhptx/iswc-2016-semantic-labeling, which was edited to use it as a baseline for Tab2KG (https://github.com/sgottsch/Tab2KG).
import logging
from elasticsearch.exceptions import RequestError
from elasticsearch.helpers import scan, bulk
from lib.utils import get_index_name
__author__ = "minh"
class Indexer:
def __init__(self, es):
self.es = es
def init_analyzers(self, index_config):
print("init_analyzers")
print(index_config)
print(get_index_name(index_config))
if(self.es.indices.exists(get_index_name(index_config))):
self.es.indices.delete(index=get_index_name(index_config))
self.es.indices.create(index=get_index_name(index_config))
def index_column(self, column, source_name, index_config):
body = column.to_json()
body['source'] = source_name
try:
self.es.index(index=get_index_name(index_config), doc_type="service",
body=body)
return True
except RequestError:
print("Error")
return False
def index_source(self, source, index_config):
# self.es.indices.put_mapping(index=get_index_name(index_config), doc_type="service", body={
# "service": {
# "properties": {
# "source": {
# "type": "string",
# "index": "not_analyzed"
# }
# }
# }
# })
for column in source.column_map.values():
if column.semantic_type:
if len(column.value_list) > 0:
successful = self.index_column(column, source.index_name, index_config)
if(not successful):
return False
else:
logging.warning("Indexer: IGNORE COLUMN `%s` in source `%s` because of empty values", column.name, source.name)
return True
def delete_column(self, attr_name, source_name, index_config):
bulk_deletes = []
for result in scan(self.es, query={
"query": {
"match": {
"name": attr_name,
}
}
}, index=get_index_name(index_config), doc_type="service", _source=False,
track_scores=False, scroll='5m'):
result['_op_type'] = 'delete'
bulk_deletes.append(result)
bulk(self.es, bulk_deletes)
| 35.125 | 176 | 0.572558 |
import logging
from elasticsearch.exceptions import RequestError
from elasticsearch.helpers import scan, bulk
from lib.utils import get_index_name
__author__ = "minh"
class Indexer:
def __init__(self, es):
self.es = es
def init_analyzers(self, index_config):
print("init_analyzers")
print(index_config)
print(get_index_name(index_config))
if(self.es.indices.exists(get_index_name(index_config))):
self.es.indices.delete(index=get_index_name(index_config))
self.es.indices.create(index=get_index_name(index_config))
def index_column(self, column, source_name, index_config):
body = column.to_json()
body['source'] = source_name
try:
self.es.index(index=get_index_name(index_config), doc_type="service",
body=body)
return True
except RequestError:
print("Error")
return False
def index_source(self, source, index_config):
for column in source.column_map.values():
if column.semantic_type:
if len(column.value_list) > 0:
successful = self.index_column(column, source.index_name, index_config)
if(not successful):
return False
else:
logging.warning("Indexer: IGNORE COLUMN `%s` in source `%s` because of empty values", column.name, source.name)
return True
def delete_column(self, attr_name, source_name, index_config):
bulk_deletes = []
for result in scan(self.es, query={
"query": {
"match": {
"name": attr_name,
}
}
}, index=get_index_name(index_config), doc_type="service", _source=False,
track_scores=False, scroll='5m'):
result['_op_type'] = 'delete'
bulk_deletes.append(result)
bulk(self.es, bulk_deletes)
| true | true |
f71cc45987d40f97d2107f51052d959e0ffc1f6c | 10,055 | py | Python | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | 2 | 2021-01-01T12:20:39.000Z | 2021-05-10T23:33:27.000Z | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | null | null | null | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This code greatly inspires itself from http://aosabook.org/en/500L/a-web-crawler-with-asyncio-coroutines.html
import cgi
from collections import namedtuple
import os
import re
import logging
import urllib
import asyncio
import aiohttp
from asyncio import Queue
import time
LOGGER = logging.getLogger(__name__)
FetchStatistic = namedtuple(
'FetchStatistic', [
'url',
'next_url',
'status',
'exception',
'size',
'content_type',
'encoding',
'num_urls',
'num_new_urls'
]
)
class Crawler(object):
""" Crawls a set of urls.
"""
def __init__(self, roots, exclude=None, strict=True, max_redirect=10, max_tries=3, max_tasks=10, *, loop=None,
max_size=1024**2, file_type=None):
self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.queue = Queue(loop=self.loop)
self.seen_urls = set()
self.done = []
self.session = aiohttp.ClientSession(loop=self.loop)
self.root_domains = set()
self.max_file_size = max_size
if file_type.startswith("."):
self.file_type = file_type
else:
self.file_type = "." + file_type
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(self.lenient_host(host))
for root in roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
@staticmethod
def lenient_host(host):
parts = host.split('.')[-2:]
return ''.join(parts)
@staticmethod
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
def close(self):
""" Close resources
:return: None
"""
self.session.close()
def host_ok(self, host):
""" Can this host be crawled?
:param host:
:return:
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self.host_ok_strict(host)
else:
return self.host_ok_lenient(host)
def host_ok_strict(self, host):
if host.startswith("www."):
host = host[4:]
else:
host = "www." + host
return host in self.root_domains
def host_ok_lenient(self, host):
return self.lenient_host(host) in self.root_domains
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
@asyncio.coroutine
def parse_links(self, response):
""" Return a FetchStatistic and list of links.
:param response:
:return: FetchStatistic and links.
"""
links = set()
content_type = None
encoding = None
body = yield from response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = yield from response.text()
# get all urls links
urls = set(re.findall(r'''(?i)href=["']([^\s"'<>]+)''', text))
if urls:
LOGGER.info("got {} distinct urls from {}".format(len(urls), response.url))
for url in urls:
normalized = urllib.parse.urljoin(response.url, url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls)
)
return stat, links
@asyncio.coroutine
def fetch(self, url, max_redirect):
""" Fetch one url.
:param url:
:param max_redirect:
:return:
"""
tries = 0
exception = None
while tries < self.max_tries:
try:
response = yield from self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info("try {} for {} success".format(tries, url))
break
except aiohttp.ClientError as client_error:
LOGGER.info("try {} for {} raised {}".format(tries, url, client_error))
exception = client_error
tries += 1
else:
# we never broke out of the loop: all tries failed
LOGGER.error("{} failed after {} tries".format(url, self.max_tries))
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
return
try:
if self.is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info("redirect to {} from {}".format(next_url, url))
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error("redirect limit reached for {} from {}".format(next_url, url))
else:
stat, links = yield from self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.queue.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
yield from response.release()
@asyncio.coroutine
def work(self):
""" Process Queue items forever.
:return: None
"""
try:
while True:
url, max_redirect = yield from self.queue.get()
assert url in self.seen_urls
yield from self.fetch(url, max_redirect)
self.queue.task_done()
except asyncio.CancelledError as cancelled:
pass
def url_allowed(self, url):
""" Is url http or https format. Also checks the pointed url file type and size.
:param url: given url
:return: True if all conditions are met. False otherwise.
"""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug("skipping non-http scheme in {}".format(url))
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_ok(host):
LOGGER.debug("skipping non-root host in {}".format(url))
return False
# check file type
if not self.file_ok(url):
LOGGER.debug("skipping non {} files".format(self.file_type))
return False
return True
def add_url(self, url, max_redirect=None):
""" Adds url to the queue if not seen before.
:param url:
:param max_redirect:
:return: None
"""
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.debug("adding {} {}".format(url, max_redirect))
self.seen_urls.add(url)
self.queue.put_nowait((url, max_redirect))
@asyncio.coroutine
def crawl(self):
""" Run the crawler until all finished.
:return: None
"""
workers = [asyncio.Task(self.work(), loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
yield from self.queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def file_ok(self, url):
""" Is the url pointing to the correct file type? Is its size OK?
:param url:
:return: True if file is from a type the user requested. False otherwise.
"""
href_path = urllib.parse.urlparse(url).path
extension = os.path.splitext(href_path)[1]
return extension == self.file_type
def size_ok(self, response):
""" Check if file size <= MAX_SIZE before downloading.
:param response:
:return:
"""
raise NotImplementedError
| 30.014925 | 114 | 0.529687 |
import cgi
from collections import namedtuple
import os
import re
import logging
import urllib
import asyncio
import aiohttp
from asyncio import Queue
import time
LOGGER = logging.getLogger(__name__)
FetchStatistic = namedtuple(
'FetchStatistic', [
'url',
'next_url',
'status',
'exception',
'size',
'content_type',
'encoding',
'num_urls',
'num_new_urls'
]
)
class Crawler(object):
def __init__(self, roots, exclude=None, strict=True, max_redirect=10, max_tries=3, max_tasks=10, *, loop=None,
max_size=1024**2, file_type=None):
self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.queue = Queue(loop=self.loop)
self.seen_urls = set()
self.done = []
self.session = aiohttp.ClientSession(loop=self.loop)
self.root_domains = set()
self.max_file_size = max_size
if file_type.startswith("."):
self.file_type = file_type
else:
self.file_type = "." + file_type
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(self.lenient_host(host))
for root in roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
@staticmethod
def lenient_host(host):
parts = host.split('.')[-2:]
return ''.join(parts)
@staticmethod
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
def close(self):
self.session.close()
def host_ok(self, host):
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self.host_ok_strict(host)
else:
return self.host_ok_lenient(host)
def host_ok_strict(self, host):
if host.startswith("www."):
host = host[4:]
else:
host = "www." + host
return host in self.root_domains
def host_ok_lenient(self, host):
return self.lenient_host(host) in self.root_domains
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
@asyncio.coroutine
def parse_links(self, response):
links = set()
content_type = None
encoding = None
body = yield from response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = yield from response.text()
urls = set(re.findall(r'''(?i)href=["']([^\s"'<>]+)''', text))
if urls:
LOGGER.info("got {} distinct urls from {}".format(len(urls), response.url))
for url in urls:
normalized = urllib.parse.urljoin(response.url, url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls)
)
return stat, links
@asyncio.coroutine
def fetch(self, url, max_redirect):
tries = 0
exception = None
while tries < self.max_tries:
try:
response = yield from self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info("try {} for {} success".format(tries, url))
break
except aiohttp.ClientError as client_error:
LOGGER.info("try {} for {} raised {}".format(tries, url, client_error))
exception = client_error
tries += 1
else:
LOGGER.error("{} failed after {} tries".format(url, self.max_tries))
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
return
try:
if self.is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info("redirect to {} from {}".format(next_url, url))
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error("redirect limit reached for {} from {}".format(next_url, url))
else:
stat, links = yield from self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.queue.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
yield from response.release()
@asyncio.coroutine
def work(self):
try:
while True:
url, max_redirect = yield from self.queue.get()
assert url in self.seen_urls
yield from self.fetch(url, max_redirect)
self.queue.task_done()
except asyncio.CancelledError as cancelled:
pass
def url_allowed(self, url):
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug("skipping non-http scheme in {}".format(url))
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_ok(host):
LOGGER.debug("skipping non-root host in {}".format(url))
return False
if not self.file_ok(url):
LOGGER.debug("skipping non {} files".format(self.file_type))
return False
return True
def add_url(self, url, max_redirect=None):
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.debug("adding {} {}".format(url, max_redirect))
self.seen_urls.add(url)
self.queue.put_nowait((url, max_redirect))
@asyncio.coroutine
def crawl(self):
workers = [asyncio.Task(self.work(), loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
yield from self.queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def file_ok(self, url):
href_path = urllib.parse.urlparse(url).path
extension = os.path.splitext(href_path)[1]
return extension == self.file_type
def size_ok(self, response):
raise NotImplementedError
| true | true |
f71cc717d2a50c2a2eac3e063f01eef3d43d7dc5 | 1,914 | py | Python | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.hourly_usage_attribution_body import HourlyUsageAttributionBody
from datadog_api_client.v1.model.hourly_usage_attribution_metadata import HourlyUsageAttributionMetadata
globals()["HourlyUsageAttributionBody"] = HourlyUsageAttributionBody
globals()["HourlyUsageAttributionMetadata"] = HourlyUsageAttributionMetadata
class HourlyUsageAttributionResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"metadata": (HourlyUsageAttributionMetadata,),
"usage": ([HourlyUsageAttributionBody],),
}
attribute_map = {
"metadata": "metadata",
"usage": "usage",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""HourlyUsageAttributionResponse - a model defined in OpenAPI
Keyword Args:
metadata (HourlyUsageAttributionMetadata): [optional]
usage ([HourlyUsageAttributionBody]): [optional] Get the hourly usage attribution by tag(s).
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(HourlyUsageAttributionResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 29.90625 | 108 | 0.698537 |
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.hourly_usage_attribution_body import HourlyUsageAttributionBody
from datadog_api_client.v1.model.hourly_usage_attribution_metadata import HourlyUsageAttributionMetadata
globals()["HourlyUsageAttributionBody"] = HourlyUsageAttributionBody
globals()["HourlyUsageAttributionMetadata"] = HourlyUsageAttributionMetadata
class HourlyUsageAttributionResponse(ModelNormal):
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"metadata": (HourlyUsageAttributionMetadata,),
"usage": ([HourlyUsageAttributionBody],),
}
attribute_map = {
"metadata": "metadata",
"usage": "usage",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
self = super(HourlyUsageAttributionResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| true | true |
f71cc725c05458f3a9369d780bd91d3992785579 | 5,283 | py | Python | ecommerce/admin.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | ecommerce/admin.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | ecommerce/admin.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """
Admin views for ecommerce models
"""
from django.contrib import admin
from ecommerce.models import (
Coupon,
CouponAudit,
CouponInvoice,
CouponInvoiceAudit,
Line,
Order,
OrderAudit,
Receipt,
RedeemedCoupon,
RedeemedCouponAudit,
UserCoupon,
UserCouponAudit,
)
from micromasters.utils import get_field_names
class LineAdmin(admin.ModelAdmin):
"""Admin for Line"""
model = Line
readonly_fields = get_field_names(Line)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class OrderAdmin(admin.ModelAdmin):
"""Admin for Order"""
model = Order
list_filter = ('status',)
list_display = ('id', 'user', 'status', 'created_at', 'course_key',)
search_fields = (
'user__username',
'user__email',
)
readonly_fields = [name for name in get_field_names(Order) if name != 'status']
def course_key(self, obj):
"""
returns first course key associated with order
"""
line = obj.line_set.first()
return line.course_key
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class OrderAuditAdmin(admin.ModelAdmin):
"""Admin for OrderAudit"""
model = OrderAudit
readonly_fields = get_field_names(OrderAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReceiptAdmin(admin.ModelAdmin):
"""Admin for Receipt"""
model = Receipt
readonly_fields = get_field_names(Receipt)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponInvoiceAdmin(admin.ModelAdmin):
"""Admin for CouponInvoice"""
model = CouponInvoice
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class CouponInvoiceAuditAdmin(admin.ModelAdmin):
"""Admin for CouponInvoiceAudit"""
model = CouponInvoiceAudit
readonly_fields = get_field_names(CouponInvoiceAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponAdmin(admin.ModelAdmin):
"""Admin for Coupon"""
model = Coupon
search_fields = (
'coupon_code',
'invoice__invoice_number',
'invoice__description',
)
list_filter = [
'invoice',
'enabled',
'coupon_type',
'amount_type',
]
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class CouponAuditAdmin(admin.ModelAdmin):
"""Admin for CouponAudit"""
model = CouponAudit
readonly_fields = get_field_names(CouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class RedeemedCouponAdmin(admin.ModelAdmin):
"""Admin for RedeemedCoupon"""
model = RedeemedCoupon
readonly_fields = get_field_names(RedeemedCoupon)
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class RedeemedCouponAuditAdmin(admin.ModelAdmin):
"""Admin for RedeemedCouponAudit"""
model = RedeemedCouponAudit
readonly_fields = get_field_names(RedeemedCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserCouponAdmin(admin.ModelAdmin):
"""Admin for UserCoupon"""
model = UserCoupon
readonly_fields = get_field_names(UserCoupon)
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class UserCouponAuditAdmin(admin.ModelAdmin):
"""Admin for UserCouponAudit"""
model = UserCouponAudit
readonly_fields = get_field_names(UserCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(CouponInvoice, CouponInvoiceAdmin)
admin.site.register(CouponInvoiceAudit, CouponInvoiceAuditAdmin)
admin.site.register(Coupon, CouponAdmin)
admin.site.register(CouponAudit, CouponAuditAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderAudit, OrderAuditAdmin)
admin.site.register(RedeemedCoupon, RedeemedCouponAdmin)
admin.site.register(RedeemedCouponAudit, RedeemedCouponAuditAdmin)
admin.site.register(Receipt, ReceiptAdmin)
admin.site.register(UserCoupon, UserCouponAdmin)
admin.site.register(UserCouponAudit, UserCouponAuditAdmin)
| 25.157143 | 83 | 0.68692 |
from django.contrib import admin
from ecommerce.models import (
Coupon,
CouponAudit,
CouponInvoice,
CouponInvoiceAudit,
Line,
Order,
OrderAudit,
Receipt,
RedeemedCoupon,
RedeemedCouponAudit,
UserCoupon,
UserCouponAudit,
)
from micromasters.utils import get_field_names
class LineAdmin(admin.ModelAdmin):
model = Line
readonly_fields = get_field_names(Line)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class OrderAdmin(admin.ModelAdmin):
model = Order
list_filter = ('status',)
list_display = ('id', 'user', 'status', 'created_at', 'course_key',)
search_fields = (
'user__username',
'user__email',
)
readonly_fields = [name for name in get_field_names(Order) if name != 'status']
def course_key(self, obj):
line = obj.line_set.first()
return line.course_key
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class OrderAuditAdmin(admin.ModelAdmin):
model = OrderAudit
readonly_fields = get_field_names(OrderAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReceiptAdmin(admin.ModelAdmin):
model = Receipt
readonly_fields = get_field_names(Receipt)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponInvoiceAdmin(admin.ModelAdmin):
model = CouponInvoice
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class CouponInvoiceAuditAdmin(admin.ModelAdmin):
model = CouponInvoiceAudit
readonly_fields = get_field_names(CouponInvoiceAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponAdmin(admin.ModelAdmin):
model = Coupon
search_fields = (
'coupon_code',
'invoice__invoice_number',
'invoice__description',
)
list_filter = [
'invoice',
'enabled',
'coupon_type',
'amount_type',
]
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class CouponAuditAdmin(admin.ModelAdmin):
model = CouponAudit
readonly_fields = get_field_names(CouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class RedeemedCouponAdmin(admin.ModelAdmin):
model = RedeemedCoupon
readonly_fields = get_field_names(RedeemedCoupon)
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class RedeemedCouponAuditAdmin(admin.ModelAdmin):
model = RedeemedCouponAudit
readonly_fields = get_field_names(RedeemedCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserCouponAdmin(admin.ModelAdmin):
model = UserCoupon
readonly_fields = get_field_names(UserCoupon)
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class UserCouponAuditAdmin(admin.ModelAdmin):
model = UserCouponAudit
readonly_fields = get_field_names(UserCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(CouponInvoice, CouponInvoiceAdmin)
admin.site.register(CouponInvoiceAudit, CouponInvoiceAuditAdmin)
admin.site.register(Coupon, CouponAdmin)
admin.site.register(CouponAudit, CouponAuditAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderAudit, OrderAuditAdmin)
admin.site.register(RedeemedCoupon, RedeemedCouponAdmin)
admin.site.register(RedeemedCouponAudit, RedeemedCouponAuditAdmin)
admin.site.register(Receipt, ReceiptAdmin)
admin.site.register(UserCoupon, UserCouponAdmin)
admin.site.register(UserCouponAudit, UserCouponAuditAdmin)
| true | true |
f71cc7626802c7caa73aac783baedbb65798da02 | 3,272 | py | Python | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | 1 | 2021-04-08T03:21:49.000Z | 2021-04-08T03:21:49.000Z | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | null | null | null | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin which tells Pylint how to handle classes which define attributes using jsonschema
in "schema" class attribute.
Those classes dyamically assign attributes defined in the schema on the class inside the
constructor.
"""
import six
from astroid import MANAGER
from astroid import nodes
from astroid import scoped_nodes
# A list of class names for which we want to skip the checks
CLASS_NAME_BLACKLIST = [
'ExecutionSpecificationAPI'
]
def register(linter):
pass
def transform(cls):
if cls.name in CLASS_NAME_BLACKLIST:
return
if cls.name.endswith('API') or 'schema' in cls.locals:
# This is a class which defines attributes in "schema" variable using json schema.
# Those attributes are then assigned during run time inside the constructor
fqdn = cls.qname()
module_name, class_name = fqdn.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
actual_cls = getattr(module, class_name)
schema = actual_cls.schema
if not isinstance(schema, dict):
# Not a class we are interested in
return
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
property_name = property_name.replace('-', '_') # Note: We do the same in Python code
property_type = property_data.get('type', None)
if isinstance(property_type, (list, tuple)):
# Hack for attributes with multiple types (e.g. string, null)
property_type = property_type[0]
if property_type == 'object':
node = nodes.Dict()
elif property_type == 'array':
node = nodes.List()
elif property_type == 'integer':
node = scoped_nodes.builtin_lookup('int')[1][0]
elif property_type == 'number':
node = scoped_nodes.builtin_lookup('float')[1][0]
elif property_type == 'string':
node = scoped_nodes.builtin_lookup('str')[1][0]
elif property_type == 'boolean':
node = scoped_nodes.builtin_lookup('bool')[1][0]
elif property_type == 'null':
node = scoped_nodes.builtin_lookup('None')[1][0]
else:
node = scoped_nodes.Class(property_name, None)
cls.locals[property_name] = [node]
MANAGER.register_transform(scoped_nodes.Class, transform)
| 36.764045 | 98 | 0.663814 |
import six
from astroid import MANAGER
from astroid import nodes
from astroid import scoped_nodes
CLASS_NAME_BLACKLIST = [
'ExecutionSpecificationAPI'
]
def register(linter):
pass
def transform(cls):
if cls.name in CLASS_NAME_BLACKLIST:
return
if cls.name.endswith('API') or 'schema' in cls.locals:
fqdn = cls.qname()
module_name, class_name = fqdn.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
actual_cls = getattr(module, class_name)
schema = actual_cls.schema
if not isinstance(schema, dict):
return
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
property_name = property_name.replace('-', '_')
property_type = property_data.get('type', None)
if isinstance(property_type, (list, tuple)):
property_type = property_type[0]
if property_type == 'object':
node = nodes.Dict()
elif property_type == 'array':
node = nodes.List()
elif property_type == 'integer':
node = scoped_nodes.builtin_lookup('int')[1][0]
elif property_type == 'number':
node = scoped_nodes.builtin_lookup('float')[1][0]
elif property_type == 'string':
node = scoped_nodes.builtin_lookup('str')[1][0]
elif property_type == 'boolean':
node = scoped_nodes.builtin_lookup('bool')[1][0]
elif property_type == 'null':
node = scoped_nodes.builtin_lookup('None')[1][0]
else:
node = scoped_nodes.Class(property_name, None)
cls.locals[property_name] = [node]
MANAGER.register_transform(scoped_nodes.Class, transform)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.